| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| 11 #include "webrtc/modules/audio_coding/neteq/neteq_impl.h" | 11 #include "webrtc/modules/audio_coding/neteq/neteq_impl.h" |
| 12 | 12 |
| 13 #include <assert.h> | 13 #include <assert.h> |
| 14 #include <memory.h> // memset | 14 #include <memory.h> // memset |
| 15 | 15 |
| 16 #include <algorithm> | 16 #include <algorithm> |
| 17 | 17 |
| 18 #include "webrtc/base/logging.h" | 18 #include "webrtc/base/logging.h" |
| 19 #include "webrtc/base/safe_conversions.h" |
| 19 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" | 20 #include "webrtc/common_audio/signal_processing/include/signal_processing_librar
y.h" |
| 20 #include "webrtc/modules/audio_coding/codecs/audio_decoder.h" | 21 #include "webrtc/modules/audio_coding/codecs/audio_decoder.h" |
| 21 #include "webrtc/modules/audio_coding/neteq/accelerate.h" | 22 #include "webrtc/modules/audio_coding/neteq/accelerate.h" |
| 22 #include "webrtc/modules/audio_coding/neteq/background_noise.h" | 23 #include "webrtc/modules/audio_coding/neteq/background_noise.h" |
| 23 #include "webrtc/modules/audio_coding/neteq/buffer_level_filter.h" | 24 #include "webrtc/modules/audio_coding/neteq/buffer_level_filter.h" |
| 24 #include "webrtc/modules/audio_coding/neteq/comfort_noise.h" | 25 #include "webrtc/modules/audio_coding/neteq/comfort_noise.h" |
| 25 #include "webrtc/modules/audio_coding/neteq/decision_logic.h" | 26 #include "webrtc/modules/audio_coding/neteq/decision_logic.h" |
| 26 #include "webrtc/modules/audio_coding/neteq/decoder_database.h" | 27 #include "webrtc/modules/audio_coding/neteq/decoder_database.h" |
| 27 #include "webrtc/modules/audio_coding/neteq/defines.h" | 28 #include "webrtc/modules/audio_coding/neteq/defines.h" |
| 28 #include "webrtc/modules/audio_coding/neteq/delay_manager.h" | 29 #include "webrtc/modules/audio_coding/neteq/delay_manager.h" |
| (...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 97 decoded_packet_timestamp_(0) { | 98 decoded_packet_timestamp_(0) { |
| 98 LOG(LS_INFO) << "NetEq config: " << config.ToString(); | 99 LOG(LS_INFO) << "NetEq config: " << config.ToString(); |
| 99 int fs = config.sample_rate_hz; | 100 int fs = config.sample_rate_hz; |
| 100 if (fs != 8000 && fs != 16000 && fs != 32000 && fs != 48000) { | 101 if (fs != 8000 && fs != 16000 && fs != 32000 && fs != 48000) { |
| 101 LOG(LS_ERROR) << "Sample rate " << fs << " Hz not supported. " << | 102 LOG(LS_ERROR) << "Sample rate " << fs << " Hz not supported. " << |
| 102 "Changing to 8000 Hz."; | 103 "Changing to 8000 Hz."; |
| 103 fs = 8000; | 104 fs = 8000; |
| 104 } | 105 } |
| 105 fs_hz_ = fs; | 106 fs_hz_ = fs; |
| 106 fs_mult_ = fs / 8000; | 107 fs_mult_ = fs / 8000; |
| 107 output_size_samples_ = kOutputSizeMs * 8 * fs_mult_; | 108 output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_); |
| 108 decoder_frame_length_ = 3 * output_size_samples_; | 109 decoder_frame_length_ = 3 * output_size_samples_; |
| 109 WebRtcSpl_Init(); | 110 WebRtcSpl_Init(); |
| 110 if (create_components) { | 111 if (create_components) { |
| 111 SetSampleRateAndChannels(fs, 1); // Default is 1 channel. | 112 SetSampleRateAndChannels(fs, 1); // Default is 1 channel. |
| 112 } | 113 } |
| 113 } | 114 } |
| 114 | 115 |
| 115 NetEqImpl::~NetEqImpl() = default; | 116 NetEqImpl::~NetEqImpl() = default; |
| 116 | 117 |
| 117 int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header, | 118 int NetEqImpl::InsertPacket(const WebRtcRTPHeader& rtp_header, |
| (...skipping 29 matching lines...) Expand all Loading... |
| 147 rtp_header, kSyncPayload, sizeof(kSyncPayload), receive_timestamp, true); | 148 rtp_header, kSyncPayload, sizeof(kSyncPayload), receive_timestamp, true); |
| 148 | 149 |
| 149 if (error != 0) { | 150 if (error != 0) { |
| 150 error_code_ = error; | 151 error_code_ = error; |
| 151 return kFail; | 152 return kFail; |
| 152 } | 153 } |
| 153 return kOK; | 154 return kOK; |
| 154 } | 155 } |
| 155 | 156 |
| 156 int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio, | 157 int NetEqImpl::GetAudio(size_t max_length, int16_t* output_audio, |
| 157 int* samples_per_channel, int* num_channels, | 158 size_t* samples_per_channel, int* num_channels, |
| 158 NetEqOutputType* type) { | 159 NetEqOutputType* type) { |
| 159 CriticalSectionScoped lock(crit_sect_.get()); | 160 CriticalSectionScoped lock(crit_sect_.get()); |
| 160 LOG(LS_VERBOSE) << "GetAudio"; | 161 LOG(LS_VERBOSE) << "GetAudio"; |
| 161 int error = GetAudioInternal(max_length, output_audio, samples_per_channel, | 162 int error = GetAudioInternal(max_length, output_audio, samples_per_channel, |
| 162 num_channels); | 163 num_channels); |
| 163 LOG(LS_VERBOSE) << "Produced " << *samples_per_channel << | 164 LOG(LS_VERBOSE) << "Produced " << *samples_per_channel << |
| 164 " samples/channel for " << *num_channels << " channel(s)"; | 165 " samples/channel for " << *num_channels << " channel(s)"; |
| 165 if (error != 0) { | 166 if (error != 0) { |
| 166 error_code_ = error; | 167 error_code_ = error; |
| 167 return kFail; | 168 return kFail; |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 298 // Deprecated. | 299 // Deprecated. |
| 299 // TODO(henrik.lundin) Delete. | 300 // TODO(henrik.lundin) Delete. |
| 300 NetEqPlayoutMode NetEqImpl::PlayoutMode() const { | 301 NetEqPlayoutMode NetEqImpl::PlayoutMode() const { |
| 301 CriticalSectionScoped lock(crit_sect_.get()); | 302 CriticalSectionScoped lock(crit_sect_.get()); |
| 302 return playout_mode_; | 303 return playout_mode_; |
| 303 } | 304 } |
| 304 | 305 |
| 305 int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) { | 306 int NetEqImpl::NetworkStatistics(NetEqNetworkStatistics* stats) { |
| 306 CriticalSectionScoped lock(crit_sect_.get()); | 307 CriticalSectionScoped lock(crit_sect_.get()); |
| 307 assert(decoder_database_.get()); | 308 assert(decoder_database_.get()); |
| 308 const int total_samples_in_buffers = | 309 const size_t total_samples_in_buffers = |
| 309 packet_buffer_->NumSamplesInBuffer(decoder_database_.get(), | 310 packet_buffer_->NumSamplesInBuffer(decoder_database_.get(), |
| 310 decoder_frame_length_) + | 311 decoder_frame_length_) + |
| 311 static_cast<int>(sync_buffer_->FutureLength()); | 312 sync_buffer_->FutureLength(); |
| 312 assert(delay_manager_.get()); | 313 assert(delay_manager_.get()); |
| 313 assert(decision_logic_.get()); | 314 assert(decision_logic_.get()); |
| 314 stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers, | 315 stats_.GetNetworkStatistics(fs_hz_, total_samples_in_buffers, |
| 315 decoder_frame_length_, *delay_manager_.get(), | 316 decoder_frame_length_, *delay_manager_.get(), |
| 316 *decision_logic_.get(), stats); | 317 *decision_logic_.get(), stats); |
| 317 return 0; | 318 return 0; |
| 318 } | 319 } |
| 319 | 320 |
| 320 void NetEqImpl::WaitingTimes(std::vector<int>* waiting_times) { | 321 void NetEqImpl::WaitingTimes(std::vector<int>* waiting_times) { |
| 321 CriticalSectionScoped lock(crit_sect_.get()); | 322 CriticalSectionScoped lock(crit_sect_.get()); |
| (...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 596 assert(decoder); // Should always get a valid object, since we have | 597 assert(decoder); // Should always get a valid object, since we have |
| 597 // already checked that the payload types are known. | 598 // already checked that the payload types are known. |
| 598 decoder->IncomingPacket(packet_list.front()->payload, | 599 decoder->IncomingPacket(packet_list.front()->payload, |
| 599 packet_list.front()->payload_length, | 600 packet_list.front()->payload_length, |
| 600 packet_list.front()->header.sequenceNumber, | 601 packet_list.front()->header.sequenceNumber, |
| 601 packet_list.front()->header.timestamp, | 602 packet_list.front()->header.timestamp, |
| 602 receive_timestamp); | 603 receive_timestamp); |
| 603 } | 604 } |
| 604 | 605 |
| 605 // Insert packets in buffer. | 606 // Insert packets in buffer. |
| 606 int temp_bufsize = packet_buffer_->NumPacketsInBuffer(); | 607 size_t temp_bufsize = packet_buffer_->NumPacketsInBuffer(); |
| 607 ret = packet_buffer_->InsertPacketList( | 608 ret = packet_buffer_->InsertPacketList( |
| 608 &packet_list, | 609 &packet_list, |
| 609 *decoder_database_, | 610 *decoder_database_, |
| 610 ¤t_rtp_payload_type_, | 611 ¤t_rtp_payload_type_, |
| 611 ¤t_cng_rtp_payload_type_); | 612 ¤t_cng_rtp_payload_type_); |
| 612 if (ret == PacketBuffer::kFlushed) { | 613 if (ret == PacketBuffer::kFlushed) { |
| 613 // Reset DSP timestamp etc. if packet buffer flushed. | 614 // Reset DSP timestamp etc. if packet buffer flushed. |
| 614 new_codec_ = true; | 615 new_codec_ = true; |
| 615 update_sample_rate_and_channels = true; | 616 update_sample_rate_and_channels = true; |
| 616 } else if (ret != PacketBuffer::kOK) { | 617 } else if (ret != PacketBuffer::kOK) { |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 658 assert(dec_info); // Already checked that the payload type is known. | 659 assert(dec_info); // Already checked that the payload type is known. |
| 659 delay_manager_->LastDecoderType(dec_info->codec_type); | 660 delay_manager_->LastDecoderType(dec_info->codec_type); |
| 660 if (delay_manager_->last_pack_cng_or_dtmf() == 0) { | 661 if (delay_manager_->last_pack_cng_or_dtmf() == 0) { |
| 661 // Calculate the total speech length carried in each packet. | 662 // Calculate the total speech length carried in each packet. |
| 662 temp_bufsize = packet_buffer_->NumPacketsInBuffer() - temp_bufsize; | 663 temp_bufsize = packet_buffer_->NumPacketsInBuffer() - temp_bufsize; |
| 663 temp_bufsize *= decoder_frame_length_; | 664 temp_bufsize *= decoder_frame_length_; |
| 664 | 665 |
| 665 if ((temp_bufsize > 0) && | 666 if ((temp_bufsize > 0) && |
| 666 (temp_bufsize != decision_logic_->packet_length_samples())) { | 667 (temp_bufsize != decision_logic_->packet_length_samples())) { |
| 667 decision_logic_->set_packet_length_samples(temp_bufsize); | 668 decision_logic_->set_packet_length_samples(temp_bufsize); |
| 668 delay_manager_->SetPacketAudioLength((1000 * temp_bufsize) / fs_hz_); | 669 delay_manager_->SetPacketAudioLength( |
| 670 static_cast<int>((1000 * temp_bufsize) / fs_hz_)); |
| 669 } | 671 } |
| 670 | 672 |
| 671 // Update statistics. | 673 // Update statistics. |
| 672 if ((int32_t) (main_header.timestamp - timestamp_) >= 0 && | 674 if ((int32_t) (main_header.timestamp - timestamp_) >= 0 && |
| 673 !new_codec_) { | 675 !new_codec_) { |
| 674 // Only update statistics if incoming packet is not older than last played | 676 // Only update statistics if incoming packet is not older than last played |
| 675 // out packet, and if new codec flag is not set. | 677 // out packet, and if new codec flag is not set. |
| 676 delay_manager_->Update(main_header.sequenceNumber, main_header.timestamp, | 678 delay_manager_->Update(main_header.sequenceNumber, main_header.timestamp, |
| 677 fs_hz_); | 679 fs_hz_); |
| 678 } | 680 } |
| 679 } else if (delay_manager_->last_pack_cng_or_dtmf() == -1) { | 681 } else if (delay_manager_->last_pack_cng_or_dtmf() == -1) { |
| 680 // This is first "normal" packet after CNG or DTMF. | 682 // This is first "normal" packet after CNG or DTMF. |
| 681 // Reset packet time counter and measure time until next packet, | 683 // Reset packet time counter and measure time until next packet, |
| 682 // but don't update statistics. | 684 // but don't update statistics. |
| 683 delay_manager_->set_last_pack_cng_or_dtmf(0); | 685 delay_manager_->set_last_pack_cng_or_dtmf(0); |
| 684 delay_manager_->ResetPacketIatCount(); | 686 delay_manager_->ResetPacketIatCount(); |
| 685 } | 687 } |
| 686 return 0; | 688 return 0; |
| 687 } | 689 } |
| 688 | 690 |
| 689 int NetEqImpl::GetAudioInternal(size_t max_length, | 691 int NetEqImpl::GetAudioInternal(size_t max_length, |
| 690 int16_t* output, | 692 int16_t* output, |
| 691 int* samples_per_channel, | 693 size_t* samples_per_channel, |
| 692 int* num_channels) { | 694 int* num_channels) { |
| 693 PacketList packet_list; | 695 PacketList packet_list; |
| 694 DtmfEvent dtmf_event; | 696 DtmfEvent dtmf_event; |
| 695 Operations operation; | 697 Operations operation; |
| 696 bool play_dtmf; | 698 bool play_dtmf; |
| 697 int return_value = GetDecision(&operation, &packet_list, &dtmf_event, | 699 int return_value = GetDecision(&operation, &packet_list, &dtmf_event, |
| 698 &play_dtmf); | 700 &play_dtmf); |
| 699 if (return_value != 0) { | 701 if (return_value != 0) { |
| 700 assert(false); | 702 assert(false); |
| 701 last_mode_ = kModeError; | 703 last_mode_ = kModeError; |
| 702 return return_value; | 704 return return_value; |
| 703 } | 705 } |
| 704 LOG(LS_VERBOSE) << "GetDecision returned operation=" << operation << | 706 LOG(LS_VERBOSE) << "GetDecision returned operation=" << operation << |
| 705 " and " << packet_list.size() << " packet(s)"; | 707 " and " << packet_list.size() << " packet(s)"; |
| 706 | 708 |
| 707 AudioDecoder::SpeechType speech_type; | 709 AudioDecoder::SpeechType speech_type; |
| 708 int length = 0; | 710 int length = 0; |
| 709 int decode_return_value = Decode(&packet_list, &operation, | 711 int decode_return_value = Decode(&packet_list, &operation, |
| 710 &length, &speech_type); | 712 &length, &speech_type); |
| 711 | 713 |
| 712 assert(vad_.get()); | 714 assert(vad_.get()); |
| 713 bool sid_frame_available = | 715 bool sid_frame_available = |
| 714 (operation == kRfc3389Cng && !packet_list.empty()); | 716 (operation == kRfc3389Cng && !packet_list.empty()); |
| 715 vad_->Update(decoded_buffer_.get(), length, speech_type, | 717 vad_->Update(decoded_buffer_.get(), static_cast<size_t>(length), speech_type, |
| 716 sid_frame_available, fs_hz_); | 718 sid_frame_available, fs_hz_); |
| 717 | 719 |
| 718 algorithm_buffer_->Clear(); | 720 algorithm_buffer_->Clear(); |
| 719 switch (operation) { | 721 switch (operation) { |
| 720 case kNormal: { | 722 case kNormal: { |
| 721 DoNormal(decoded_buffer_.get(), length, speech_type, play_dtmf); | 723 DoNormal(decoded_buffer_.get(), length, speech_type, play_dtmf); |
| 722 break; | 724 break; |
| 723 } | 725 } |
| 724 case kMerge: { | 726 case kMerge: { |
| 725 DoMerge(decoded_buffer_.get(), length, speech_type, play_dtmf); | 727 DoMerge(decoded_buffer_.get(), length, speech_type, play_dtmf); |
| (...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 804 // Copy from |algorithm_buffer| to |sync_buffer_|. | 806 // Copy from |algorithm_buffer| to |sync_buffer_|. |
| 805 sync_buffer_->PushBack(*algorithm_buffer_); | 807 sync_buffer_->PushBack(*algorithm_buffer_); |
| 806 | 808 |
| 807 // Extract data from |sync_buffer_| to |output|. | 809 // Extract data from |sync_buffer_| to |output|. |
| 808 size_t num_output_samples_per_channel = output_size_samples_; | 810 size_t num_output_samples_per_channel = output_size_samples_; |
| 809 size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels(); | 811 size_t num_output_samples = output_size_samples_ * sync_buffer_->Channels(); |
| 810 if (num_output_samples > max_length) { | 812 if (num_output_samples > max_length) { |
| 811 LOG(LS_WARNING) << "Output array is too short. " << max_length << " < " << | 813 LOG(LS_WARNING) << "Output array is too short. " << max_length << " < " << |
| 812 output_size_samples_ << " * " << sync_buffer_->Channels(); | 814 output_size_samples_ << " * " << sync_buffer_->Channels(); |
| 813 num_output_samples = max_length; | 815 num_output_samples = max_length; |
| 814 num_output_samples_per_channel = static_cast<int>( | 816 num_output_samples_per_channel = max_length / sync_buffer_->Channels(); |
| 815 max_length / sync_buffer_->Channels()); | |
| 816 } | 817 } |
| 817 const int samples_from_sync = | 818 const size_t samples_from_sync = |
| 818 static_cast<int>(sync_buffer_->GetNextAudioInterleaved( | 819 sync_buffer_->GetNextAudioInterleaved(num_output_samples_per_channel, |
| 819 num_output_samples_per_channel, output)); | 820 output); |
| 820 *num_channels = static_cast<int>(sync_buffer_->Channels()); | 821 *num_channels = static_cast<int>(sync_buffer_->Channels()); |
| 821 LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" << | 822 LOG(LS_VERBOSE) << "Sync buffer (" << *num_channels << " channel(s)):" << |
| 822 " insert " << algorithm_buffer_->Size() << " samples, extract " << | 823 " insert " << algorithm_buffer_->Size() << " samples, extract " << |
| 823 samples_from_sync << " samples"; | 824 samples_from_sync << " samples"; |
| 824 if (samples_from_sync != output_size_samples_) { | 825 if (samples_from_sync != output_size_samples_) { |
| 825 LOG(LS_ERROR) << "samples_from_sync (" << samples_from_sync | 826 LOG(LS_ERROR) << "samples_from_sync (" << samples_from_sync |
| 826 << ") != output_size_samples_ (" << output_size_samples_ | 827 << ") != output_size_samples_ (" << output_size_samples_ |
| 827 << ")"; | 828 << ")"; |
| 828 // TODO(minyue): treatment of under-run, filling zeros | 829 // TODO(minyue): treatment of under-run, filling zeros |
| 829 memset(output, 0, num_output_samples * sizeof(int16_t)); | 830 memset(output, 0, num_output_samples * sizeof(int16_t)); |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 915 } | 916 } |
| 916 | 917 |
| 917 assert(expand_.get()); | 918 assert(expand_.get()); |
| 918 const int samples_left = static_cast<int>(sync_buffer_->FutureLength() - | 919 const int samples_left = static_cast<int>(sync_buffer_->FutureLength() - |
| 919 expand_->overlap_length()); | 920 expand_->overlap_length()); |
| 920 if (last_mode_ == kModeAccelerateSuccess || | 921 if (last_mode_ == kModeAccelerateSuccess || |
| 921 last_mode_ == kModeAccelerateLowEnergy || | 922 last_mode_ == kModeAccelerateLowEnergy || |
| 922 last_mode_ == kModePreemptiveExpandSuccess || | 923 last_mode_ == kModePreemptiveExpandSuccess || |
| 923 last_mode_ == kModePreemptiveExpandLowEnergy) { | 924 last_mode_ == kModePreemptiveExpandLowEnergy) { |
| 924 // Subtract (samples_left + output_size_samples_) from sampleMemory. | 925 // Subtract (samples_left + output_size_samples_) from sampleMemory. |
| 925 decision_logic_->AddSampleMemory(-(samples_left + output_size_samples_)); | 926 decision_logic_->AddSampleMemory( |
| 927 -(samples_left + rtc::checked_cast<int>(output_size_samples_))); |
| 926 } | 928 } |
| 927 | 929 |
| 928 // Check if it is time to play a DTMF event. | 930 // Check if it is time to play a DTMF event. |
| 929 if (dtmf_buffer_->GetEvent( | 931 if (dtmf_buffer_->GetEvent( |
| 930 static_cast<uint32_t>( | 932 static_cast<uint32_t>( |
| 931 end_timestamp + decision_logic_->generated_noise_samples()), | 933 end_timestamp + decision_logic_->generated_noise_samples()), |
| 932 dtmf_event)) { | 934 dtmf_event)) { |
| 933 *play_dtmf = true; | 935 *play_dtmf = true; |
| 934 } | 936 } |
| 935 | 937 |
| 936 // Get instruction. | 938 // Get instruction. |
| 937 assert(sync_buffer_.get()); | 939 assert(sync_buffer_.get()); |
| 938 assert(expand_.get()); | 940 assert(expand_.get()); |
| 939 *operation = decision_logic_->GetDecision(*sync_buffer_, | 941 *operation = decision_logic_->GetDecision(*sync_buffer_, |
| 940 *expand_, | 942 *expand_, |
| 941 decoder_frame_length_, | 943 decoder_frame_length_, |
| 942 header, | 944 header, |
| 943 last_mode_, | 945 last_mode_, |
| 944 *play_dtmf, | 946 *play_dtmf, |
| 945 &reset_decoder_); | 947 &reset_decoder_); |
| 946 | 948 |
| 947 // Check if we already have enough samples in the |sync_buffer_|. If so, | 949 // Check if we already have enough samples in the |sync_buffer_|. If so, |
| 948 // change decision to normal, unless the decision was merge, accelerate, or | 950 // change decision to normal, unless the decision was merge, accelerate, or |
| 949 // preemptive expand. | 951 // preemptive expand. |
| 950 if (samples_left >= output_size_samples_ && *operation != kMerge && | 952 if (samples_left >= rtc::checked_cast<int>(output_size_samples_) && |
| 951 *operation != kAccelerate && *operation != kFastAccelerate && | 953 *operation != kMerge && |
| 954 *operation != kAccelerate && |
| 955 *operation != kFastAccelerate && |
| 952 *operation != kPreemptiveExpand) { | 956 *operation != kPreemptiveExpand) { |
| 953 *operation = kNormal; | 957 *operation = kNormal; |
| 954 return 0; | 958 return 0; |
| 955 } | 959 } |
| 956 | 960 |
| 957 decision_logic_->ExpandDecision(*operation); | 961 decision_logic_->ExpandDecision(*operation); |
| 958 | 962 |
| 959 // Check conditions for reset. | 963 // Check conditions for reset. |
| 960 if (new_codec_ || *operation == kUndefined) { | 964 if (new_codec_ || *operation == kUndefined) { |
| 961 // The only valid reason to get kUndefined is that new_codec_ is set. | 965 // The only valid reason to get kUndefined is that new_codec_ is set. |
| (...skipping 27 matching lines...) Expand all Loading... |
| 989 // new value. | 993 // new value. |
| 990 sync_buffer_->IncreaseEndTimestamp(timestamp_ - end_timestamp); | 994 sync_buffer_->IncreaseEndTimestamp(timestamp_ - end_timestamp); |
| 991 end_timestamp = timestamp_; | 995 end_timestamp = timestamp_; |
| 992 new_codec_ = false; | 996 new_codec_ = false; |
| 993 decision_logic_->SoftReset(); | 997 decision_logic_->SoftReset(); |
| 994 buffer_level_filter_->Reset(); | 998 buffer_level_filter_->Reset(); |
| 995 delay_manager_->Reset(); | 999 delay_manager_->Reset(); |
| 996 stats_.ResetMcu(); | 1000 stats_.ResetMcu(); |
| 997 } | 1001 } |
| 998 | 1002 |
| 999 int required_samples = output_size_samples_; | 1003 size_t required_samples = output_size_samples_; |
| 1000 const int samples_10_ms = 80 * fs_mult_; | 1004 const size_t samples_10_ms = static_cast<size_t>(80 * fs_mult_); |
| 1001 const int samples_20_ms = 2 * samples_10_ms; | 1005 const size_t samples_20_ms = 2 * samples_10_ms; |
| 1002 const int samples_30_ms = 3 * samples_10_ms; | 1006 const size_t samples_30_ms = 3 * samples_10_ms; |
| 1003 | 1007 |
| 1004 switch (*operation) { | 1008 switch (*operation) { |
| 1005 case kExpand: { | 1009 case kExpand: { |
| 1006 timestamp_ = end_timestamp; | 1010 timestamp_ = end_timestamp; |
| 1007 return 0; | 1011 return 0; |
| 1008 } | 1012 } |
| 1009 case kRfc3389CngNoPacket: | 1013 case kRfc3389CngNoPacket: |
| 1010 case kCodecInternalCng: { | 1014 case kCodecInternalCng: { |
| 1011 return 0; | 1015 return 0; |
| 1012 } | 1016 } |
| 1013 case kDtmf: { | 1017 case kDtmf: { |
| 1014 // TODO(hlundin): Write test for this. | 1018 // TODO(hlundin): Write test for this. |
| 1015 // Update timestamp. | 1019 // Update timestamp. |
| 1016 timestamp_ = end_timestamp; | 1020 timestamp_ = end_timestamp; |
| 1017 if (decision_logic_->generated_noise_samples() > 0 && | 1021 if (decision_logic_->generated_noise_samples() > 0 && |
| 1018 last_mode_ != kModeDtmf) { | 1022 last_mode_ != kModeDtmf) { |
| 1019 // Make a jump in timestamp due to the recently played comfort noise. | 1023 // Make a jump in timestamp due to the recently played comfort noise. |
| 1020 uint32_t timestamp_jump = | 1024 uint32_t timestamp_jump = |
| 1021 static_cast<uint32_t>(decision_logic_->generated_noise_samples()); | 1025 static_cast<uint32_t>(decision_logic_->generated_noise_samples()); |
| 1022 sync_buffer_->IncreaseEndTimestamp(timestamp_jump); | 1026 sync_buffer_->IncreaseEndTimestamp(timestamp_jump); |
| 1023 timestamp_ += timestamp_jump; | 1027 timestamp_ += timestamp_jump; |
| 1024 } | 1028 } |
| 1025 decision_logic_->set_generated_noise_samples(0); | 1029 decision_logic_->set_generated_noise_samples(0); |
| 1026 return 0; | 1030 return 0; |
| 1027 } | 1031 } |
| 1028 case kAccelerate: | 1032 case kAccelerate: |
| 1029 case kFastAccelerate: { | 1033 case kFastAccelerate: { |
| 1030 // In order to do an accelerate we need at least 30 ms of audio data. | 1034 // In order to do an accelerate we need at least 30 ms of audio data. |
| 1031 if (samples_left >= samples_30_ms) { | 1035 if (samples_left >= static_cast<int>(samples_30_ms)) { |
| 1032 // Already have enough data, so we do not need to extract any more. | 1036 // Already have enough data, so we do not need to extract any more. |
| 1033 decision_logic_->set_sample_memory(samples_left); | 1037 decision_logic_->set_sample_memory(samples_left); |
| 1034 decision_logic_->set_prev_time_scale(true); | 1038 decision_logic_->set_prev_time_scale(true); |
| 1035 return 0; | 1039 return 0; |
| 1036 } else if (samples_left >= samples_10_ms && | 1040 } else if (samples_left >= static_cast<int>(samples_10_ms) && |
| 1037 decoder_frame_length_ >= samples_30_ms) { | 1041 decoder_frame_length_ >= samples_30_ms) { |
| 1038 // Avoid decoding more data as it might overflow the playout buffer. | 1042 // Avoid decoding more data as it might overflow the playout buffer. |
| 1039 *operation = kNormal; | 1043 *operation = kNormal; |
| 1040 return 0; | 1044 return 0; |
| 1041 } else if (samples_left < samples_20_ms && | 1045 } else if (samples_left < static_cast<int>(samples_20_ms) && |
| 1042 decoder_frame_length_ < samples_30_ms) { | 1046 decoder_frame_length_ < samples_30_ms) { |
| 1043 // Build up decoded data by decoding at least 20 ms of audio data. Do | 1047 // Build up decoded data by decoding at least 20 ms of audio data. Do |
| 1044 // not perform accelerate yet, but wait until we only need to do one | 1048 // not perform accelerate yet, but wait until we only need to do one |
| 1045 // decoding. | 1049 // decoding. |
| 1046 required_samples = 2 * output_size_samples_; | 1050 required_samples = 2 * output_size_samples_; |
| 1047 *operation = kNormal; | 1051 *operation = kNormal; |
| 1048 } | 1052 } |
| 1049 // If none of the above is true, we have one of two possible situations: | 1053 // If none of the above is true, we have one of two possible situations: |
| 1050 // (1) 20 ms <= samples_left < 30 ms and decoder_frame_length_ < 30 ms; or | 1054 // (1) 20 ms <= samples_left < 30 ms and decoder_frame_length_ < 30 ms; or |
| 1051 // (2) samples_left < 10 ms and decoder_frame_length_ >= 30 ms. | 1055 // (2) samples_left < 10 ms and decoder_frame_length_ >= 30 ms. |
| 1052 // In either case, we move on with the accelerate decision, and decode one | 1056 // In either case, we move on with the accelerate decision, and decode one |
| 1053 // frame now. | 1057 // frame now. |
| 1054 break; | 1058 break; |
| 1055 } | 1059 } |
| 1056 case kPreemptiveExpand: { | 1060 case kPreemptiveExpand: { |
| 1057 // In order to do a preemptive expand we need at least 30 ms of decoded | 1061 // In order to do a preemptive expand we need at least 30 ms of decoded |
| 1058 // audio data. | 1062 // audio data. |
| 1059 if ((samples_left >= samples_30_ms) || | 1063 if ((samples_left >= static_cast<int>(samples_30_ms)) || |
| 1060 (samples_left >= samples_10_ms && | 1064 (samples_left >= static_cast<int>(samples_10_ms) && |
| 1061 decoder_frame_length_ >= samples_30_ms)) { | 1065 decoder_frame_length_ >= samples_30_ms)) { |
| 1062 // Already have enough data, so we do not need to extract any more. | 1066 // Already have enough data, so we do not need to extract any more. |
| 1063 // Or, avoid decoding more data as it might overflow the playout buffer. | 1067 // Or, avoid decoding more data as it might overflow the playout buffer. |
| 1064 // Still try preemptive expand, though. | 1068 // Still try preemptive expand, though. |
| 1065 decision_logic_->set_sample_memory(samples_left); | 1069 decision_logic_->set_sample_memory(samples_left); |
| 1066 decision_logic_->set_prev_time_scale(true); | 1070 decision_logic_->set_prev_time_scale(true); |
| 1067 return 0; | 1071 return 0; |
| 1068 } | 1072 } |
| 1069 if (samples_left < samples_20_ms && | 1073 if (samples_left < static_cast<int>(samples_20_ms) && |
| 1070 decoder_frame_length_ < samples_30_ms) { | 1074 decoder_frame_length_ < samples_30_ms) { |
| 1071 // Build up decoded data by decoding at least 20 ms of audio data. | 1075 // Build up decoded data by decoding at least 20 ms of audio data. |
| 1072 // Still try to perform preemptive expand. | 1076 // Still try to perform preemptive expand. |
| 1073 required_samples = 2 * output_size_samples_; | 1077 required_samples = 2 * output_size_samples_; |
| 1074 } | 1078 } |
| 1075 // Move on with the preemptive expand decision. | 1079 // Move on with the preemptive expand decision. |
| 1076 break; | 1080 break; |
| 1077 } | 1081 } |
| 1078 case kMerge: { | 1082 case kMerge: { |
| 1079 required_samples = | 1083 required_samples = |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1116 } | 1120 } |
| 1117 | 1121 |
| 1118 if (*operation == kAccelerate || *operation == kFastAccelerate || | 1122 if (*operation == kAccelerate || *operation == kFastAccelerate || |
| 1119 *operation == kPreemptiveExpand) { | 1123 *operation == kPreemptiveExpand) { |
| 1120 decision_logic_->set_sample_memory(samples_left + extracted_samples); | 1124 decision_logic_->set_sample_memory(samples_left + extracted_samples); |
| 1121 decision_logic_->set_prev_time_scale(true); | 1125 decision_logic_->set_prev_time_scale(true); |
| 1122 } | 1126 } |
| 1123 | 1127 |
| 1124 if (*operation == kAccelerate || *operation == kFastAccelerate) { | 1128 if (*operation == kAccelerate || *operation == kFastAccelerate) { |
| 1125 // Check that we have enough data (30ms) to do accelerate. | 1129 // Check that we have enough data (30ms) to do accelerate. |
| 1126 if (extracted_samples + samples_left < samples_30_ms) { | 1130 if (extracted_samples + samples_left < static_cast<int>(samples_30_ms)) { |
| 1127 // TODO(hlundin): Write test for this. | 1131 // TODO(hlundin): Write test for this. |
| 1128 // Not enough, do normal operation instead. | 1132 // Not enough, do normal operation instead. |
| 1129 *operation = kNormal; | 1133 *operation = kNormal; |
| 1130 } | 1134 } |
| 1131 } | 1135 } |
| 1132 | 1136 |
| 1133 timestamp_ = end_timestamp; | 1137 timestamp_ = end_timestamp; |
| 1134 return 0; | 1138 return 0; |
| 1135 } | 1139 } |
| 1136 | 1140 |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1267 // Decode to silence with the same frame size as the last decode. | 1271 // Decode to silence with the same frame size as the last decode. |
| 1268 LOG(LS_VERBOSE) << "Decoding sync-packet: " << | 1272 LOG(LS_VERBOSE) << "Decoding sync-packet: " << |
| 1269 " ts=" << packet->header.timestamp << | 1273 " ts=" << packet->header.timestamp << |
| 1270 ", sn=" << packet->header.sequenceNumber << | 1274 ", sn=" << packet->header.sequenceNumber << |
| 1271 ", pt=" << static_cast<int>(packet->header.payloadType) << | 1275 ", pt=" << static_cast<int>(packet->header.payloadType) << |
| 1272 ", ssrc=" << packet->header.ssrc << | 1276 ", ssrc=" << packet->header.ssrc << |
| 1273 ", len=" << packet->payload_length; | 1277 ", len=" << packet->payload_length; |
| 1274 memset(&decoded_buffer_[*decoded_length], 0, | 1278 memset(&decoded_buffer_[*decoded_length], 0, |
| 1275 decoder_frame_length_ * decoder->Channels() * | 1279 decoder_frame_length_ * decoder->Channels() * |
| 1276 sizeof(decoded_buffer_[0])); | 1280 sizeof(decoded_buffer_[0])); |
| 1277 decode_length = decoder_frame_length_; | 1281 decode_length = rtc::checked_cast<int>(decoder_frame_length_); |
| 1278 } else if (!packet->primary) { | 1282 } else if (!packet->primary) { |
| 1279 // This is a redundant payload; call the special decoder method. | 1283 // This is a redundant payload; call the special decoder method. |
| 1280 LOG(LS_VERBOSE) << "Decoding packet (redundant):" << | 1284 LOG(LS_VERBOSE) << "Decoding packet (redundant):" << |
| 1281 " ts=" << packet->header.timestamp << | 1285 " ts=" << packet->header.timestamp << |
| 1282 ", sn=" << packet->header.sequenceNumber << | 1286 ", sn=" << packet->header.sequenceNumber << |
| 1283 ", pt=" << static_cast<int>(packet->header.payloadType) << | 1287 ", pt=" << static_cast<int>(packet->header.payloadType) << |
| 1284 ", ssrc=" << packet->header.ssrc << | 1288 ", ssrc=" << packet->header.ssrc << |
| 1285 ", len=" << packet->payload_length; | 1289 ", len=" << packet->payload_length; |
| 1286 decode_length = decoder->DecodeRedundant( | 1290 decode_length = decoder->DecodeRedundant( |
| 1287 packet->payload, packet->payload_length, fs_hz_, | 1291 packet->payload, packet->payload_length, fs_hz_, |
| (...skipping 12 matching lines...) Expand all Loading... |
| 1300 &decoded_buffer_[*decoded_length], speech_type); | 1304 &decoded_buffer_[*decoded_length], speech_type); |
| 1301 } | 1305 } |
| 1302 | 1306 |
| 1303 delete[] packet->payload; | 1307 delete[] packet->payload; |
| 1304 delete packet; | 1308 delete packet; |
| 1305 packet = NULL; | 1309 packet = NULL; |
| 1306 if (decode_length > 0) { | 1310 if (decode_length > 0) { |
| 1307 *decoded_length += decode_length; | 1311 *decoded_length += decode_length; |
| 1308 // Update |decoder_frame_length_| with number of samples per channel. | 1312 // Update |decoder_frame_length_| with number of samples per channel. |
| 1309 decoder_frame_length_ = | 1313 decoder_frame_length_ = |
| 1310 decode_length / static_cast<int>(decoder->Channels()); | 1314 static_cast<size_t>(decode_length) / decoder->Channels(); |
| 1311 LOG(LS_VERBOSE) << "Decoded " << decode_length << " samples (" | 1315 LOG(LS_VERBOSE) << "Decoded " << decode_length << " samples (" |
| 1312 << decoder->Channels() << " channel(s) -> " | 1316 << decoder->Channels() << " channel(s) -> " |
| 1313 << decoder_frame_length_ << " samples per channel)"; | 1317 << decoder_frame_length_ << " samples per channel)"; |
| 1314 } else if (decode_length < 0) { | 1318 } else if (decode_length < 0) { |
| 1315 // Error. | 1319 // Error. |
| 1316 LOG(LS_WARNING) << "Decode " << decode_length << " " << payload_length; | 1320 LOG(LS_WARNING) << "Decode " << decode_length << " " << payload_length; |
| 1317 *decoded_length = -1; | 1321 *decoded_length = -1; |
| 1318 PacketBuffer::DeleteAllPackets(packet_list); | 1322 PacketBuffer::DeleteAllPackets(packet_list); |
| 1319 break; | 1323 break; |
| 1320 } | 1324 } |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1359 | 1363 |
| 1360 if (!play_dtmf) { | 1364 if (!play_dtmf) { |
| 1361 dtmf_tone_generator_->Reset(); | 1365 dtmf_tone_generator_->Reset(); |
| 1362 } | 1366 } |
| 1363 } | 1367 } |
| 1364 | 1368 |
| 1365 void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length, | 1369 void NetEqImpl::DoMerge(int16_t* decoded_buffer, size_t decoded_length, |
| 1366 AudioDecoder::SpeechType speech_type, bool play_dtmf) { | 1370 AudioDecoder::SpeechType speech_type, bool play_dtmf) { |
| 1367 assert(mute_factor_array_.get()); | 1371 assert(mute_factor_array_.get()); |
| 1368 assert(merge_.get()); | 1372 assert(merge_.get()); |
| 1369 int new_length = merge_->Process(decoded_buffer, decoded_length, | 1373 size_t new_length = merge_->Process(decoded_buffer, decoded_length, |
| 1370 mute_factor_array_.get(), | 1374 mute_factor_array_.get(), |
| 1371 algorithm_buffer_.get()); | 1375 algorithm_buffer_.get()); |
| 1372 int expand_length_correction = new_length - | 1376 size_t expand_length_correction = new_length - |
| 1373 static_cast<int>(decoded_length / algorithm_buffer_->Channels()); | 1377 decoded_length / algorithm_buffer_->Channels(); |
| 1374 | 1378 |
| 1375 // Update in-call and post-call statistics. | 1379 // Update in-call and post-call statistics. |
| 1376 if (expand_->MuteFactor(0) == 0) { | 1380 if (expand_->MuteFactor(0) == 0) { |
| 1377 // Expand generates only noise. | 1381 // Expand generates only noise. |
| 1378 stats_.ExpandedNoiseSamples(expand_length_correction); | 1382 stats_.ExpandedNoiseSamples(expand_length_correction); |
| 1379 } else { | 1383 } else { |
| 1380 // Expansion generates more than only noise. | 1384 // Expansion generates more than only noise. |
| 1381 stats_.ExpandedVoiceSamples(expand_length_correction); | 1385 stats_.ExpandedVoiceSamples(expand_length_correction); |
| 1382 } | 1386 } |
| 1383 | 1387 |
| 1384 last_mode_ = kModeMerge; | 1388 last_mode_ = kModeMerge; |
| 1385 // If last packet was decoded as an inband CNG, set mode to CNG instead. | 1389 // If last packet was decoded as an inband CNG, set mode to CNG instead. |
| 1386 if (speech_type == AudioDecoder::kComfortNoise) { | 1390 if (speech_type == AudioDecoder::kComfortNoise) { |
| 1387 last_mode_ = kModeCodecInternalCng; | 1391 last_mode_ = kModeCodecInternalCng; |
| 1388 } | 1392 } |
| 1389 expand_->Reset(); | 1393 expand_->Reset(); |
| 1390 if (!play_dtmf) { | 1394 if (!play_dtmf) { |
| 1391 dtmf_tone_generator_->Reset(); | 1395 dtmf_tone_generator_->Reset(); |
| 1392 } | 1396 } |
| 1393 } | 1397 } |
| 1394 | 1398 |
| 1395 int NetEqImpl::DoExpand(bool play_dtmf) { | 1399 int NetEqImpl::DoExpand(bool play_dtmf) { |
| 1396 while ((sync_buffer_->FutureLength() - expand_->overlap_length()) < | 1400 while ((sync_buffer_->FutureLength() - expand_->overlap_length()) < |
| 1397 static_cast<size_t>(output_size_samples_)) { | 1401 output_size_samples_) { |
| 1398 algorithm_buffer_->Clear(); | 1402 algorithm_buffer_->Clear(); |
| 1399 int return_value = expand_->Process(algorithm_buffer_.get()); | 1403 int return_value = expand_->Process(algorithm_buffer_.get()); |
| 1400 int length = static_cast<int>(algorithm_buffer_->Size()); | 1404 size_t length = algorithm_buffer_->Size(); |
| 1401 | 1405 |
| 1402 // Update in-call and post-call statistics. | 1406 // Update in-call and post-call statistics. |
| 1403 if (expand_->MuteFactor(0) == 0) { | 1407 if (expand_->MuteFactor(0) == 0) { |
| 1404 // Expand operation generates only noise. | 1408 // Expand operation generates only noise. |
| 1405 stats_.ExpandedNoiseSamples(length); | 1409 stats_.ExpandedNoiseSamples(length); |
| 1406 } else { | 1410 } else { |
| 1407 // Expand operation generates more than only noise. | 1411 // Expand operation generates more than only noise. |
| 1408 stats_.ExpandedVoiceSamples(length); | 1412 stats_.ExpandedVoiceSamples(length); |
| 1409 } | 1413 } |
| 1410 | 1414 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 1421 dtmf_tone_generator_->Reset(); | 1425 dtmf_tone_generator_->Reset(); |
| 1422 } | 1426 } |
| 1423 return 0; | 1427 return 0; |
| 1424 } | 1428 } |
| 1425 | 1429 |
| 1426 int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, | 1430 int NetEqImpl::DoAccelerate(int16_t* decoded_buffer, |
| 1427 size_t decoded_length, | 1431 size_t decoded_length, |
| 1428 AudioDecoder::SpeechType speech_type, | 1432 AudioDecoder::SpeechType speech_type, |
| 1429 bool play_dtmf, | 1433 bool play_dtmf, |
| 1430 bool fast_accelerate) { | 1434 bool fast_accelerate) { |
| 1431 const size_t required_samples = 240 * fs_mult_; // Must have 30 ms. | 1435 const size_t required_samples = |
| 1436 static_cast<size_t>(240 * fs_mult_); // Must have 30 ms. |
| 1432 size_t borrowed_samples_per_channel = 0; | 1437 size_t borrowed_samples_per_channel = 0; |
| 1433 size_t num_channels = algorithm_buffer_->Channels(); | 1438 size_t num_channels = algorithm_buffer_->Channels(); |
| 1434 size_t decoded_length_per_channel = decoded_length / num_channels; | 1439 size_t decoded_length_per_channel = decoded_length / num_channels; |
| 1435 if (decoded_length_per_channel < required_samples) { | 1440 if (decoded_length_per_channel < required_samples) { |
| 1436 // Must move data from the |sync_buffer_| in order to get 30 ms. | 1441 // Must move data from the |sync_buffer_| in order to get 30 ms. |
| 1437 borrowed_samples_per_channel = static_cast<int>(required_samples - | 1442 borrowed_samples_per_channel = static_cast<int>(required_samples - |
| 1438 decoded_length_per_channel); | 1443 decoded_length_per_channel); |
| 1439 memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], | 1444 memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], |
| 1440 decoded_buffer, | 1445 decoded_buffer, |
| 1441 sizeof(int16_t) * decoded_length); | 1446 sizeof(int16_t) * decoded_length); |
| 1442 sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel, | 1447 sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel, |
| 1443 decoded_buffer); | 1448 decoded_buffer); |
| 1444 decoded_length = required_samples * num_channels; | 1449 decoded_length = required_samples * num_channels; |
| 1445 } | 1450 } |
| 1446 | 1451 |
| 1447 int16_t samples_removed; | 1452 size_t samples_removed; |
| 1448 Accelerate::ReturnCodes return_code = | 1453 Accelerate::ReturnCodes return_code = |
| 1449 accelerate_->Process(decoded_buffer, decoded_length, fast_accelerate, | 1454 accelerate_->Process(decoded_buffer, decoded_length, fast_accelerate, |
| 1450 algorithm_buffer_.get(), &samples_removed); | 1455 algorithm_buffer_.get(), &samples_removed); |
| 1451 stats_.AcceleratedSamples(samples_removed); | 1456 stats_.AcceleratedSamples(samples_removed); |
| 1452 switch (return_code) { | 1457 switch (return_code) { |
| 1453 case Accelerate::kSuccess: | 1458 case Accelerate::kSuccess: |
| 1454 last_mode_ = kModeAccelerateSuccess; | 1459 last_mode_ = kModeAccelerateSuccess; |
| 1455 break; | 1460 break; |
| 1456 case Accelerate::kSuccessLowEnergy: | 1461 case Accelerate::kSuccessLowEnergy: |
| 1457 last_mode_ = kModeAccelerateLowEnergy; | 1462 last_mode_ = kModeAccelerateLowEnergy; |
| (...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1494 dtmf_tone_generator_->Reset(); | 1499 dtmf_tone_generator_->Reset(); |
| 1495 } | 1500 } |
| 1496 expand_->Reset(); | 1501 expand_->Reset(); |
| 1497 return 0; | 1502 return 0; |
| 1498 } | 1503 } |
| 1499 | 1504 |
| 1500 int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer, | 1505 int NetEqImpl::DoPreemptiveExpand(int16_t* decoded_buffer, |
| 1501 size_t decoded_length, | 1506 size_t decoded_length, |
| 1502 AudioDecoder::SpeechType speech_type, | 1507 AudioDecoder::SpeechType speech_type, |
| 1503 bool play_dtmf) { | 1508 bool play_dtmf) { |
| 1504 const size_t required_samples = 240 * fs_mult_; // Must have 30 ms. | 1509 const size_t required_samples = |
| 1510 static_cast<size_t>(240 * fs_mult_); // Must have 30 ms. |
| 1505 size_t num_channels = algorithm_buffer_->Channels(); | 1511 size_t num_channels = algorithm_buffer_->Channels(); |
| 1506 int borrowed_samples_per_channel = 0; | 1512 size_t borrowed_samples_per_channel = 0; |
| 1507 int old_borrowed_samples_per_channel = 0; | 1513 size_t old_borrowed_samples_per_channel = 0; |
| 1508 size_t decoded_length_per_channel = decoded_length / num_channels; | 1514 size_t decoded_length_per_channel = decoded_length / num_channels; |
| 1509 if (decoded_length_per_channel < required_samples) { | 1515 if (decoded_length_per_channel < required_samples) { |
| 1510 // Must move data from the |sync_buffer_| in order to get 30 ms. | 1516 // Must move data from the |sync_buffer_| in order to get 30 ms. |
| 1511 borrowed_samples_per_channel = static_cast<int>(required_samples - | 1517 borrowed_samples_per_channel = |
| 1512 decoded_length_per_channel); | 1518 required_samples - decoded_length_per_channel; |
| 1513 // Calculate how many of these were already played out. | 1519 // Calculate how many of these were already played out. |
| 1514 const int future_length = static_cast<int>(sync_buffer_->FutureLength()); | |
| 1515 old_borrowed_samples_per_channel = | 1520 old_borrowed_samples_per_channel = |
| 1516 (borrowed_samples_per_channel > future_length) ? | 1521 (borrowed_samples_per_channel > sync_buffer_->FutureLength()) ? |
| 1517 (borrowed_samples_per_channel - future_length) : 0; | 1522 (borrowed_samples_per_channel - sync_buffer_->FutureLength()) : 0; |
| 1518 memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], | 1523 memmove(&decoded_buffer[borrowed_samples_per_channel * num_channels], |
| 1519 decoded_buffer, | 1524 decoded_buffer, |
| 1520 sizeof(int16_t) * decoded_length); | 1525 sizeof(int16_t) * decoded_length); |
| 1521 sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel, | 1526 sync_buffer_->ReadInterleavedFromEnd(borrowed_samples_per_channel, |
| 1522 decoded_buffer); | 1527 decoded_buffer); |
| 1523 decoded_length = required_samples * num_channels; | 1528 decoded_length = required_samples * num_channels; |
| 1524 } | 1529 } |
| 1525 | 1530 |
| 1526 int16_t samples_added; | 1531 size_t samples_added; |
| 1527 PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process( | 1532 PreemptiveExpand::ReturnCodes return_code = preemptive_expand_->Process( |
| 1528 decoded_buffer, static_cast<int>(decoded_length), | 1533 decoded_buffer, decoded_length, |
| 1529 old_borrowed_samples_per_channel, | 1534 old_borrowed_samples_per_channel, |
| 1530 algorithm_buffer_.get(), &samples_added); | 1535 algorithm_buffer_.get(), &samples_added); |
| 1531 stats_.PreemptiveExpandedSamples(samples_added); | 1536 stats_.PreemptiveExpandedSamples(samples_added); |
| 1532 switch (return_code) { | 1537 switch (return_code) { |
| 1533 case PreemptiveExpand::kSuccess: | 1538 case PreemptiveExpand::kSuccess: |
| 1534 last_mode_ = kModePreemptiveExpandSuccess; | 1539 last_mode_ = kModePreemptiveExpandSuccess; |
| 1535 break; | 1540 break; |
| 1536 case PreemptiveExpand::kSuccessLowEnergy: | 1541 case PreemptiveExpand::kSuccessLowEnergy: |
| 1537 last_mode_ = kModePreemptiveExpandLowEnergy; | 1542 last_mode_ = kModePreemptiveExpandLowEnergy; |
| 1538 break; | 1543 break; |
| (...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1712 expand_->Reset(); | 1717 expand_->Reset(); |
| 1713 last_mode_ = kModeDtmf; | 1718 last_mode_ = kModeDtmf; |
| 1714 | 1719 |
| 1715 // Set to false because the DTMF is already in the algorithm buffer. | 1720 // Set to false because the DTMF is already in the algorithm buffer. |
| 1716 *play_dtmf = false; | 1721 *play_dtmf = false; |
| 1717 return 0; | 1722 return 0; |
| 1718 } | 1723 } |
| 1719 | 1724 |
| 1720 void NetEqImpl::DoAlternativePlc(bool increase_timestamp) { | 1725 void NetEqImpl::DoAlternativePlc(bool increase_timestamp) { |
| 1721 AudioDecoder* decoder = decoder_database_->GetActiveDecoder(); | 1726 AudioDecoder* decoder = decoder_database_->GetActiveDecoder(); |
| 1722 int length; | 1727 size_t length; |
| 1723 if (decoder && decoder->HasDecodePlc()) { | 1728 if (decoder && decoder->HasDecodePlc()) { |
| 1724 // Use the decoder's packet-loss concealment. | 1729 // Use the decoder's packet-loss concealment. |
| 1725 // TODO(hlundin): Will probably need a longer buffer for multi-channel. | 1730 // TODO(hlundin): Will probably need a longer buffer for multi-channel. |
| 1726 int16_t decoded_buffer[kMaxFrameSize]; | 1731 int16_t decoded_buffer[kMaxFrameSize]; |
| 1727 length = decoder->DecodePlc(1, decoded_buffer); | 1732 length = decoder->DecodePlc(1, decoded_buffer); |
| 1728 if (length > 0) { | 1733 if (length > 0) |
| 1729 algorithm_buffer_->PushBackInterleaved(decoded_buffer, length); | 1734 algorithm_buffer_->PushBackInterleaved(decoded_buffer, length); |
| 1730 } else { | |
| 1731 length = 0; | |
| 1732 } | |
| 1733 } else { | 1735 } else { |
| 1734 // Do simple zero-stuffing. | 1736 // Do simple zero-stuffing. |
| 1735 length = output_size_samples_; | 1737 length = output_size_samples_; |
| 1736 algorithm_buffer_->Zeros(length); | 1738 algorithm_buffer_->Zeros(length); |
| 1737 // By not advancing the timestamp, NetEq inserts samples. | 1739 // By not advancing the timestamp, NetEq inserts samples. |
| 1738 stats_.AddZeros(length); | 1740 stats_.AddZeros(length); |
| 1739 } | 1741 } |
| 1740 if (increase_timestamp) { | 1742 if (increase_timestamp) { |
| 1741 sync_buffer_->IncreaseEndTimestamp(static_cast<uint32_t>(length)); | 1743 sync_buffer_->IncreaseEndTimestamp(static_cast<uint32_t>(length)); |
| 1742 } | 1744 } |
| 1743 expand_->Reset(); | 1745 expand_->Reset(); |
| 1744 } | 1746 } |
| 1745 | 1747 |
| 1746 int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels, | 1748 int NetEqImpl::DtmfOverdub(const DtmfEvent& dtmf_event, size_t num_channels, |
| 1747 int16_t* output) const { | 1749 int16_t* output) const { |
| 1748 size_t out_index = 0; | 1750 size_t out_index = 0; |
| 1749 int overdub_length = output_size_samples_; // Default value. | 1751 size_t overdub_length = output_size_samples_; // Default value. |
| 1750 | 1752 |
| 1751 if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) { | 1753 if (sync_buffer_->dtmf_index() > sync_buffer_->next_index()) { |
| 1752 // Special operation for transition from "DTMF only" to "DTMF overdub". | 1754 // Special operation for transition from "DTMF only" to "DTMF overdub". |
| 1753 out_index = std::min( | 1755 out_index = std::min( |
| 1754 sync_buffer_->dtmf_index() - sync_buffer_->next_index(), | 1756 sync_buffer_->dtmf_index() - sync_buffer_->next_index(), |
| 1755 static_cast<size_t>(output_size_samples_)); | 1757 output_size_samples_); |
| 1756 overdub_length = output_size_samples_ - static_cast<int>(out_index); | 1758 overdub_length = output_size_samples_ - out_index; |
| 1757 } | 1759 } |
| 1758 | 1760 |
| 1759 AudioMultiVector dtmf_output(num_channels); | 1761 AudioMultiVector dtmf_output(num_channels); |
| 1760 int dtmf_return_value = 0; | 1762 int dtmf_return_value = 0; |
| 1761 if (!dtmf_tone_generator_->initialized()) { | 1763 if (!dtmf_tone_generator_->initialized()) { |
| 1762 dtmf_return_value = dtmf_tone_generator_->Init(fs_hz_, dtmf_event.event_no, | 1764 dtmf_return_value = dtmf_tone_generator_->Init(fs_hz_, dtmf_event.event_no, |
| 1763 dtmf_event.volume); | 1765 dtmf_event.volume); |
| 1764 } | 1766 } |
| 1765 if (dtmf_return_value == 0) { | 1767 if (dtmf_return_value == 0) { |
| 1766 dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length, | 1768 dtmf_return_value = dtmf_tone_generator_->Generate(overdub_length, |
| 1767 &dtmf_output); | 1769 &dtmf_output); |
| 1768 assert((size_t) overdub_length == dtmf_output.Size()); | 1770 assert(overdub_length == dtmf_output.Size()); |
| 1769 } | 1771 } |
| 1770 dtmf_output.ReadInterleaved(overdub_length, &output[out_index]); | 1772 dtmf_output.ReadInterleaved(overdub_length, &output[out_index]); |
| 1771 return dtmf_return_value < 0 ? dtmf_return_value : 0; | 1773 return dtmf_return_value < 0 ? dtmf_return_value : 0; |
| 1772 } | 1774 } |
| 1773 | 1775 |
| 1774 int NetEqImpl::ExtractPackets(int required_samples, PacketList* packet_list) { | 1776 int NetEqImpl::ExtractPackets(size_t required_samples, |
| 1777 PacketList* packet_list) { |
| 1775 bool first_packet = true; | 1778 bool first_packet = true; |
| 1776 uint8_t prev_payload_type = 0; | 1779 uint8_t prev_payload_type = 0; |
| 1777 uint32_t prev_timestamp = 0; | 1780 uint32_t prev_timestamp = 0; |
| 1778 uint16_t prev_sequence_number = 0; | 1781 uint16_t prev_sequence_number = 0; |
| 1779 bool next_packet_available = false; | 1782 bool next_packet_available = false; |
| 1780 | 1783 |
| 1781 const RTPHeader* header = packet_buffer_->NextRtpHeader(); | 1784 const RTPHeader* header = packet_buffer_->NextRtpHeader(); |
| 1782 assert(header); | 1785 assert(header); |
| 1783 if (!header) { | 1786 if (!header) { |
| 1784 LOG(LS_ERROR) << "Packet buffer unexpectedly empty."; | 1787 LOG(LS_ERROR) << "Packet buffer unexpectedly empty."; |
| 1785 return -1; | 1788 return -1; |
| 1786 } | 1789 } |
| 1787 uint32_t first_timestamp = header->timestamp; | 1790 uint32_t first_timestamp = header->timestamp; |
| 1788 int extracted_samples = 0; | 1791 int extracted_samples = 0; |
| 1789 | 1792 |
| 1790 // Packet extraction loop. | 1793 // Packet extraction loop. |
| 1791 do { | 1794 do { |
| 1792 timestamp_ = header->timestamp; | 1795 timestamp_ = header->timestamp; |
| 1793 int discard_count = 0; | 1796 size_t discard_count = 0; |
| 1794 Packet* packet = packet_buffer_->GetNextPacket(&discard_count); | 1797 Packet* packet = packet_buffer_->GetNextPacket(&discard_count); |
| 1795 // |header| may be invalid after the |packet_buffer_| operation. | 1798 // |header| may be invalid after the |packet_buffer_| operation. |
| 1796 header = NULL; | 1799 header = NULL; |
| 1797 if (!packet) { | 1800 if (!packet) { |
| 1798 LOG(LS_ERROR) << "Should always be able to extract a packet here"; | 1801 LOG(LS_ERROR) << "Should always be able to extract a packet here"; |
| 1799 assert(false); // Should always be able to extract a packet here. | 1802 assert(false); // Should always be able to extract a packet here. |
| 1800 return -1; | 1803 return -1; |
| 1801 } | 1804 } |
| 1802 stats_.PacketsDiscarded(discard_count); | 1805 stats_.PacketsDiscarded(discard_count); |
| 1803 // Store waiting time in ms; packets->waiting_time is in "output blocks". | 1806 // Store waiting time in ms; packets->waiting_time is in "output blocks". |
| 1804 stats_.StoreWaitingTime(packet->waiting_time * kOutputSizeMs); | 1807 stats_.StoreWaitingTime(packet->waiting_time * kOutputSizeMs); |
| 1805 assert(packet->payload_length > 0); | 1808 assert(packet->payload_length > 0); |
| 1806 packet_list->push_back(packet); // Store packet in list. | 1809 packet_list->push_back(packet); // Store packet in list. |
| 1807 | 1810 |
| 1808 if (first_packet) { | 1811 if (first_packet) { |
| 1809 first_packet = false; | 1812 first_packet = false; |
| 1810 decoded_packet_sequence_number_ = prev_sequence_number = | 1813 decoded_packet_sequence_number_ = prev_sequence_number = |
| 1811 packet->header.sequenceNumber; | 1814 packet->header.sequenceNumber; |
| 1812 decoded_packet_timestamp_ = prev_timestamp = packet->header.timestamp; | 1815 decoded_packet_timestamp_ = prev_timestamp = packet->header.timestamp; |
| 1813 prev_payload_type = packet->header.payloadType; | 1816 prev_payload_type = packet->header.payloadType; |
| 1814 } | 1817 } |
| 1815 | 1818 |
| 1816 // Store number of extracted samples. | 1819 // Store number of extracted samples. |
| 1817 int packet_duration = 0; | 1820 int packet_duration = 0; |
| 1818 AudioDecoder* decoder = decoder_database_->GetDecoder( | 1821 AudioDecoder* decoder = decoder_database_->GetDecoder( |
| 1819 packet->header.payloadType); | 1822 packet->header.payloadType); |
| 1820 if (decoder) { | 1823 if (decoder) { |
| 1821 if (packet->sync_packet) { | 1824 if (packet->sync_packet) { |
| 1822 packet_duration = decoder_frame_length_; | 1825 packet_duration = rtc::checked_cast<int>(decoder_frame_length_); |
| 1823 } else { | 1826 } else { |
| 1824 if (packet->primary) { | 1827 if (packet->primary) { |
| 1825 packet_duration = decoder->PacketDuration(packet->payload, | 1828 packet_duration = decoder->PacketDuration(packet->payload, |
| 1826 packet->payload_length); | 1829 packet->payload_length); |
| 1827 } else { | 1830 } else { |
| 1828 packet_duration = decoder-> | 1831 packet_duration = decoder-> |
| 1829 PacketDurationRedundant(packet->payload, packet->payload_length); | 1832 PacketDurationRedundant(packet->payload, packet->payload_length); |
| 1830 stats_.SecondaryDecodedSamples(packet_duration); | 1833 stats_.SecondaryDecodedSamples(packet_duration); |
| 1831 } | 1834 } |
| 1832 } | 1835 } |
| 1833 } else { | 1836 } else { |
| 1834 LOG(LS_WARNING) << "Unknown payload type " | 1837 LOG(LS_WARNING) << "Unknown payload type " |
| 1835 << static_cast<int>(packet->header.payloadType); | 1838 << static_cast<int>(packet->header.payloadType); |
| 1836 assert(false); | 1839 assert(false); |
| 1837 } | 1840 } |
| 1838 if (packet_duration <= 0) { | 1841 if (packet_duration <= 0) { |
| 1839 // Decoder did not return a packet duration. Assume that the packet | 1842 // Decoder did not return a packet duration. Assume that the packet |
| 1840 // contains the same number of samples as the previous one. | 1843 // contains the same number of samples as the previous one. |
| 1841 packet_duration = decoder_frame_length_; | 1844 packet_duration = rtc::checked_cast<int>(decoder_frame_length_); |
| 1842 } | 1845 } |
| 1843 extracted_samples = packet->header.timestamp - first_timestamp + | 1846 extracted_samples = packet->header.timestamp - first_timestamp + |
| 1844 packet_duration; | 1847 packet_duration; |
| 1845 | 1848 |
| 1846 // Check what packet is available next. | 1849 // Check what packet is available next. |
| 1847 header = packet_buffer_->NextRtpHeader(); | 1850 header = packet_buffer_->NextRtpHeader(); |
| 1848 next_packet_available = false; | 1851 next_packet_available = false; |
| 1849 if (header && prev_payload_type == header->payloadType) { | 1852 if (header && prev_payload_type == header->payloadType) { |
| 1850 int16_t seq_no_diff = header->sequenceNumber - prev_sequence_number; | 1853 int16_t seq_no_diff = header->sequenceNumber - prev_sequence_number; |
| 1851 int32_t ts_diff = header->timestamp - prev_timestamp; | 1854 size_t ts_diff = header->timestamp - prev_timestamp; |
| 1852 if (seq_no_diff == 1 || | 1855 if (seq_no_diff == 1 || |
| 1853 (seq_no_diff == 0 && ts_diff == decoder_frame_length_)) { | 1856 (seq_no_diff == 0 && ts_diff == decoder_frame_length_)) { |
| 1854 // The next sequence number is available, or the next part of a packet | 1857 // The next sequence number is available, or the next part of a packet |
| 1855 // that was split into pieces upon insertion. | 1858 // that was split into pieces upon insertion. |
| 1856 next_packet_available = true; | 1859 next_packet_available = true; |
| 1857 } | 1860 } |
| 1858 prev_sequence_number = header->sequenceNumber; | 1861 prev_sequence_number = header->sequenceNumber; |
| 1859 } | 1862 } |
| 1860 } while (extracted_samples < required_samples && next_packet_available); | 1863 } while (extracted_samples < rtc::checked_cast<int>(required_samples) && |
| 1864 next_packet_available); |
| 1861 | 1865 |
| 1862 if (extracted_samples > 0) { | 1866 if (extracted_samples > 0) { |
| 1863 // Delete old packets only when we are going to decode something. Otherwise, | 1867 // Delete old packets only when we are going to decode something. Otherwise, |
| 1864 // we could end up in the situation where we never decode anything, since | 1868 // we could end up in the situation where we never decode anything, since |
| 1865 // all incoming packets are considered too old but the buffer will also | 1869 // all incoming packets are considered too old but the buffer will also |
| 1866 // never be flooded and flushed. | 1870 // never be flooded and flushed. |
| 1867 packet_buffer_->DiscardAllOldPackets(timestamp_); | 1871 packet_buffer_->DiscardAllOldPackets(timestamp_); |
| 1868 } | 1872 } |
| 1869 | 1873 |
| 1870 return extracted_samples; | 1874 return extracted_samples; |
| 1871 } | 1875 } |
| 1872 | 1876 |
| 1873 void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) { | 1877 void NetEqImpl::UpdatePlcComponents(int fs_hz, size_t channels) { |
| 1874 // Delete objects and create new ones. | 1878 // Delete objects and create new ones. |
| 1875 expand_.reset(expand_factory_->Create(background_noise_.get(), | 1879 expand_.reset(expand_factory_->Create(background_noise_.get(), |
| 1876 sync_buffer_.get(), &random_vector_, | 1880 sync_buffer_.get(), &random_vector_, |
| 1877 &stats_, fs_hz, channels)); | 1881 &stats_, fs_hz, channels)); |
| 1878 merge_.reset(new Merge(fs_hz, channels, expand_.get(), sync_buffer_.get())); | 1882 merge_.reset(new Merge(fs_hz, channels, expand_.get(), sync_buffer_.get())); |
| 1879 } | 1883 } |
| 1880 | 1884 |
| 1881 void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) { | 1885 void NetEqImpl::SetSampleRateAndChannels(int fs_hz, size_t channels) { |
| 1882 LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " " << channels; | 1886 LOG(LS_VERBOSE) << "SetSampleRateAndChannels " << fs_hz << " " << channels; |
| 1883 // TODO(hlundin): Change to an enumerator and skip assert. | 1887 // TODO(hlundin): Change to an enumerator and skip assert. |
| 1884 assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000); | 1888 assert(fs_hz == 8000 || fs_hz == 16000 || fs_hz == 32000 || fs_hz == 48000); |
| 1885 assert(channels > 0); | 1889 assert(channels > 0); |
| 1886 | 1890 |
| 1887 fs_hz_ = fs_hz; | 1891 fs_hz_ = fs_hz; |
| 1888 fs_mult_ = fs_hz / 8000; | 1892 fs_mult_ = fs_hz / 8000; |
| 1889 output_size_samples_ = kOutputSizeMs * 8 * fs_mult_; | 1893 output_size_samples_ = static_cast<size_t>(kOutputSizeMs * 8 * fs_mult_); |
| 1890 decoder_frame_length_ = 3 * output_size_samples_; // Initialize to 30ms. | 1894 decoder_frame_length_ = 3 * output_size_samples_; // Initialize to 30ms. |
| 1891 | 1895 |
| 1892 last_mode_ = kModeNormal; | 1896 last_mode_ = kModeNormal; |
| 1893 | 1897 |
| 1894 // Create a new array of mute factors and set all to 1. | 1898 // Create a new array of mute factors and set all to 1. |
| 1895 mute_factor_array_.reset(new int16_t[channels]); | 1899 mute_factor_array_.reset(new int16_t[channels]); |
| 1896 for (size_t i = 0; i < channels; ++i) { | 1900 for (size_t i = 0; i < channels; ++i) { |
| 1897 mute_factor_array_[i] = 16384; // 1.0 in Q14. | 1901 mute_factor_array_[i] = 16384; // 1.0 in Q14. |
| 1898 } | 1902 } |
| 1899 | 1903 |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1924 | 1928 |
| 1925 // Move index so that we create a small set of future samples (all 0). | 1929 // Move index so that we create a small set of future samples (all 0). |
| 1926 sync_buffer_->set_next_index(sync_buffer_->next_index() - | 1930 sync_buffer_->set_next_index(sync_buffer_->next_index() - |
| 1927 expand_->overlap_length()); | 1931 expand_->overlap_length()); |
| 1928 | 1932 |
| 1929 normal_.reset(new Normal(fs_hz, decoder_database_.get(), *background_noise_, | 1933 normal_.reset(new Normal(fs_hz, decoder_database_.get(), *background_noise_, |
| 1930 expand_.get())); | 1934 expand_.get())); |
| 1931 accelerate_.reset( | 1935 accelerate_.reset( |
| 1932 accelerate_factory_->Create(fs_hz, channels, *background_noise_)); | 1936 accelerate_factory_->Create(fs_hz, channels, *background_noise_)); |
| 1933 preemptive_expand_.reset(preemptive_expand_factory_->Create( | 1937 preemptive_expand_.reset(preemptive_expand_factory_->Create( |
| 1934 fs_hz, channels, | 1938 fs_hz, channels, *background_noise_, expand_->overlap_length())); |
| 1935 *background_noise_, | |
| 1936 static_cast<int>(expand_->overlap_length()))); | |
| 1937 | 1939 |
| 1938 // Delete ComfortNoise object and create a new one. | 1940 // Delete ComfortNoise object and create a new one. |
| 1939 comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(), | 1941 comfort_noise_.reset(new ComfortNoise(fs_hz, decoder_database_.get(), |
| 1940 sync_buffer_.get())); | 1942 sync_buffer_.get())); |
| 1941 | 1943 |
| 1942 // Verify that |decoded_buffer_| is long enough. | 1944 // Verify that |decoded_buffer_| is long enough. |
| 1943 if (decoded_buffer_length_ < kMaxFrameSize * channels) { | 1945 if (decoded_buffer_length_ < kMaxFrameSize * channels) { |
| 1944 // Reallocate to larger size. | 1946 // Reallocate to larger size. |
| 1945 decoded_buffer_length_ = kMaxFrameSize * channels; | 1947 decoded_buffer_length_ = kMaxFrameSize * channels; |
| 1946 decoded_buffer_.reset(new int16_t[decoded_buffer_length_]); | 1948 decoded_buffer_.reset(new int16_t[decoded_buffer_length_]); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 1973 | 1975 |
| 1974 void NetEqImpl::CreateDecisionLogic() { | 1976 void NetEqImpl::CreateDecisionLogic() { |
| 1975 decision_logic_.reset(DecisionLogic::Create(fs_hz_, output_size_samples_, | 1977 decision_logic_.reset(DecisionLogic::Create(fs_hz_, output_size_samples_, |
| 1976 playout_mode_, | 1978 playout_mode_, |
| 1977 decoder_database_.get(), | 1979 decoder_database_.get(), |
| 1978 *packet_buffer_.get(), | 1980 *packet_buffer_.get(), |
| 1979 delay_manager_.get(), | 1981 delay_manager_.get(), |
| 1980 buffer_level_filter_.get())); | 1982 buffer_level_filter_.get())); |
| 1981 } | 1983 } |
| 1982 } // namespace webrtc | 1984 } // namespace webrtc |
| OLD | NEW |