| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 43 return 0; | 43 return 0; |
| 44 } | 44 } |
| 45 output->PushBackInterleaved(input, length); | 45 output->PushBackInterleaved(input, length); |
| 46 int16_t* signal = &(*output)[0][0]; | 46 int16_t* signal = &(*output)[0][0]; |
| 47 | 47 |
| 48 const unsigned fs_mult = fs_hz_ / 8000; | 48 const unsigned fs_mult = fs_hz_ / 8000; |
| 49 assert(fs_mult > 0); | 49 assert(fs_mult > 0); |
| 50 // fs_shift = log2(fs_mult), rounded down. | 50 // fs_shift = log2(fs_mult), rounded down. |
| 51 // Note that |fs_shift| is not "exact" for 48 kHz. | 51 // Note that |fs_shift| is not "exact" for 48 kHz. |
| 52 // TODO(hlundin): Investigate this further. | 52 // TODO(hlundin): Investigate this further. |
| 53 const int fs_shift = 30 - WebRtcSpl_NormW32(fs_mult); | 53 const int fs_shift = 30 - WebRtcSpl_NormW32(static_cast<int32_t>(fs_mult)); |
| 54 | 54 |
| 55 // Check if last RecOut call resulted in an Expand. If so, we have to take | 55 // Check if last RecOut call resulted in an Expand. If so, we have to take |
| 56 // care of some cross-fading and unmuting. | 56 // care of some cross-fading and unmuting. |
| 57 if (last_mode == kModeExpand) { | 57 if (last_mode == kModeExpand) { |
| 58 // Generate interpolation data using Expand. | 58 // Generate interpolation data using Expand. |
| 59 // First, set Expand parameters to appropriate values. | 59 // First, set Expand parameters to appropriate values. |
| 60 expand_->SetParametersForNormalAfterExpand(); | 60 expand_->SetParametersForNormalAfterExpand(); |
| 61 | 61 |
| 62 // Call Expand. | 62 // Call Expand. |
| 63 AudioMultiVector expanded(output->Channels()); | 63 AudioMultiVector expanded(output->Channels()); |
| (...skipping 26 matching lines...) Expand all Loading... |
| 90 } | 90 } |
| 91 | 91 |
| 92 int mute_factor; | 92 int mute_factor; |
| 93 if ((energy != 0) && | 93 if ((energy != 0) && |
| 94 (energy > background_noise_.Energy(channel_ix))) { | 94 (energy > background_noise_.Energy(channel_ix))) { |
| 95 // Normalize new frame energy to 15 bits. | 95 // Normalize new frame energy to 15 bits. |
| 96 scaling = WebRtcSpl_NormW32(energy) - 16; | 96 scaling = WebRtcSpl_NormW32(energy) - 16; |
| 97 // We want background_noise_.energy() / energy in Q14. | 97 // We want background_noise_.energy() / energy in Q14. |
| 98 int32_t bgn_energy = | 98 int32_t bgn_energy = |
| 99 background_noise_.Energy(channel_ix) << (scaling+14); | 99 background_noise_.Energy(channel_ix) << (scaling+14); |
| 100 int16_t energy_scaled = energy << scaling; | 100 int16_t energy_scaled = static_cast<int16_t>(energy << scaling); |
| 101 int16_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled); | 101 int32_t ratio = WebRtcSpl_DivW32W16(bgn_energy, energy_scaled); |
| 102 mute_factor = WebRtcSpl_SqrtFloor(static_cast<int32_t>(ratio) << 14); | 102 mute_factor = WebRtcSpl_SqrtFloor(ratio << 14); |
| 103 } else { | 103 } else { |
| 104 mute_factor = 16384; // 1.0 in Q14. | 104 mute_factor = 16384; // 1.0 in Q14. |
| 105 } | 105 } |
| 106 if (mute_factor > external_mute_factor_array[channel_ix]) { | 106 if (mute_factor > external_mute_factor_array[channel_ix]) { |
| 107 external_mute_factor_array[channel_ix] = std::min(mute_factor, 16384); | 107 external_mute_factor_array[channel_ix] = |
| 108 static_cast<int16_t>(std::min(mute_factor, 16384)); |
| 108 } | 109 } |
| 109 | 110 |
| 110 // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14). | 111 // If muted increase by 0.64 for every 20 ms (NB/WB 0.0040/0.0020 in Q14). |
| 111 int16_t increment = 64 / fs_mult; | 112 int16_t increment = 64 / fs_mult; |
| 112 for (size_t i = 0; i < length_per_channel; i++) { | 113 for (size_t i = 0; i < length_per_channel; i++) { |
| 113 // Scale with mute factor. | 114 // Scale with mute factor. |
| 114 assert(channel_ix < output->Channels()); | 115 assert(channel_ix < output->Channels()); |
| 115 assert(i < output->Size()); | 116 assert(i < output->Size()); |
| 116 int32_t scaled_signal = (*output)[channel_ix][i] * | 117 int32_t scaled_signal = (*output)[channel_ix][i] * |
| 117 external_mute_factor_array[channel_ix]; | 118 external_mute_factor_array[channel_ix]; |
| 118 // Shift 14 with proper rounding. | 119 // Shift 14 with proper rounding. |
| 119 (*output)[channel_ix][i] = (scaled_signal + 8192) >> 14; | 120 (*output)[channel_ix][i] = |
| 121 static_cast<int16_t>((scaled_signal + 8192) >> 14); |
| 120 // Increase mute_factor towards 16384. | 122 // Increase mute_factor towards 16384. |
| 121 external_mute_factor_array[channel_ix] = | 123 external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min( |
| 122 std::min(external_mute_factor_array[channel_ix] + increment, 16384); | 124 external_mute_factor_array[channel_ix] + increment, 16384)); |
| 123 } | 125 } |
| 124 | 126 |
| 125 // Interpolate the expanded data into the new vector. | 127 // Interpolate the expanded data into the new vector. |
| 126 // (NB/WB/SWB32/SWB48 8/16/32/48 samples.) | 128 // (NB/WB/SWB32/SWB48 8/16/32/48 samples.) |
| 127 assert(fs_shift < 3); // Will always be 0, 1, or, 2. | 129 assert(fs_shift < 3); // Will always be 0, 1, or, 2. |
| 128 increment = 4 >> fs_shift; | 130 increment = 4 >> fs_shift; |
| 129 int fraction = increment; | 131 int fraction = increment; |
| 130 for (size_t i = 0; i < 8 * fs_mult; i++) { | 132 for (size_t i = 0; i < 8 * fs_mult; i++) { |
| 131 // TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8 | 133 // TODO(hlundin): Add 16 instead of 8 for correct rounding. Keeping 8 |
| 132 // now for legacy bit-exactness. | 134 // now for legacy bit-exactness. |
| 133 assert(channel_ix < output->Channels()); | 135 assert(channel_ix < output->Channels()); |
| 134 assert(i < output->Size()); | 136 assert(i < output->Size()); |
| 135 (*output)[channel_ix][i] = | 137 (*output)[channel_ix][i] = |
| 136 (fraction * (*output)[channel_ix][i] + | 138 static_cast<int16_t>((fraction * (*output)[channel_ix][i] + |
| 137 (32 - fraction) * expanded[channel_ix][i] + 8) >> 5; | 139 (32 - fraction) * expanded[channel_ix][i] + 8) >> 5); |
| 138 fraction += increment; | 140 fraction += increment; |
| 139 } | 141 } |
| 140 } | 142 } |
| 141 } else if (last_mode == kModeRfc3389Cng) { | 143 } else if (last_mode == kModeRfc3389Cng) { |
| 142 assert(output->Channels() == 1); // Not adapted for multi-channel yet. | 144 assert(output->Channels() == 1); // Not adapted for multi-channel yet. |
| 143 static const int kCngLength = 32; | 145 static const int kCngLength = 32; |
| 144 int16_t cng_output[kCngLength]; | 146 int16_t cng_output[kCngLength]; |
| 145 // Reset mute factor and start up fresh. | 147 // Reset mute factor and start up fresh. |
| 146 external_mute_factor_array[0] = 16384; | 148 external_mute_factor_array[0] = 16384; |
| 147 AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder(); | 149 AudioDecoder* cng_decoder = decoder_database_->GetActiveCngDecoder(); |
| (...skipping 30 matching lines...) Expand all Loading... |
| 178 size_t length_per_channel = length / output->Channels(); | 180 size_t length_per_channel = length / output->Channels(); |
| 179 for (size_t i = 0; i < length_per_channel; i++) { | 181 for (size_t i = 0; i < length_per_channel; i++) { |
| 180 for (size_t channel_ix = 0; channel_ix < output->Channels(); | 182 for (size_t channel_ix = 0; channel_ix < output->Channels(); |
| 181 ++channel_ix) { | 183 ++channel_ix) { |
| 182 // Scale with mute factor. | 184 // Scale with mute factor. |
| 183 assert(channel_ix < output->Channels()); | 185 assert(channel_ix < output->Channels()); |
| 184 assert(i < output->Size()); | 186 assert(i < output->Size()); |
| 185 int32_t scaled_signal = (*output)[channel_ix][i] * | 187 int32_t scaled_signal = (*output)[channel_ix][i] * |
| 186 external_mute_factor_array[channel_ix]; | 188 external_mute_factor_array[channel_ix]; |
| 187 // Shift 14 with proper rounding. | 189 // Shift 14 with proper rounding. |
| 188 (*output)[channel_ix][i] = (scaled_signal + 8192) >> 14; | 190 (*output)[channel_ix][i] = |
| 191 static_cast<int16_t>((scaled_signal + 8192) >> 14); |
| 189 // Increase mute_factor towards 16384. | 192 // Increase mute_factor towards 16384. |
| 190 external_mute_factor_array[channel_ix] = | 193 external_mute_factor_array[channel_ix] = static_cast<int16_t>(std::min( |
| 191 std::min(16384, external_mute_factor_array[channel_ix] + increment); | 194 16384, external_mute_factor_array[channel_ix] + increment)); |
| 192 } | 195 } |
| 193 } | 196 } |
| 194 } | 197 } |
| 195 | 198 |
| 196 return static_cast<int>(length); | 199 return static_cast<int>(length); |
| 197 } | 200 } |
| 198 | 201 |
| 199 } // namespace webrtc | 202 } // namespace webrtc |
| OLD | NEW |