OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
232 voiced_vector + temp_lenght, parameters.current_voice_mix_factor, | 232 voiced_vector + temp_lenght, parameters.current_voice_mix_factor, |
233 unvoiced_vector + temp_lenght, temp_scale, 14, | 233 unvoiced_vector + temp_lenght, temp_scale, 14, |
234 temp_data + temp_lenght, static_cast<int>(current_lag - temp_lenght)); | 234 temp_data + temp_lenght, static_cast<int>(current_lag - temp_lenght)); |
235 } | 235 } |
236 | 236 |
237 // Select muting slope depending on how many consecutive expands we have | 237 // Select muting slope depending on how many consecutive expands we have |
238 // done. | 238 // done. |
239 if (consecutive_expands_ == 3) { | 239 if (consecutive_expands_ == 3) { |
240 // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms. | 240 // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms. |
241 // mute_slope = 0.0010 / fs_mult in Q20. | 241 // mute_slope = 0.0010 / fs_mult in Q20. |
242 parameters.mute_slope = std::max(parameters.mute_slope, 1049 / fs_mult); | 242 parameters.mute_slope = std::max(parameters.mute_slope, |
| 243 static_cast<int16_t>(1049 / fs_mult)); |
243 } | 244 } |
244 if (consecutive_expands_ == 7) { | 245 if (consecutive_expands_ == 7) { |
245 // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms. | 246 // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms. |
246 // mute_slope = 0.0020 / fs_mult in Q20. | 247 // mute_slope = 0.0020 / fs_mult in Q20. |
247 parameters.mute_slope = std::max(parameters.mute_slope, 2097 / fs_mult); | 248 parameters.mute_slope = std::max(parameters.mute_slope, |
| 249 static_cast<int16_t>(2097 / fs_mult)); |
248 } | 250 } |
249 | 251 |
250 // Mute segment according to slope value. | 252 // Mute segment according to slope value. |
251 if ((consecutive_expands_ != 0) || !parameters.onset) { | 253 if ((consecutive_expands_ != 0) || !parameters.onset) { |
252 // Mute to the previous level, then continue with the muting. | 254 // Mute to the previous level, then continue with the muting. |
253 WebRtcSpl_AffineTransformVector(temp_data, temp_data, | 255 WebRtcSpl_AffineTransformVector(temp_data, temp_data, |
254 parameters.mute_factor, 8192, | 256 parameters.mute_factor, 8192, |
255 14, static_cast<int>(current_lag)); | 257 14, static_cast<int>(current_lag)); |
256 | 258 |
257 if (!stop_muting_) { | 259 if (!stop_muting_) { |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
359 int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength; | 361 int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength; |
360 | 362 |
361 const size_t signal_length = 256 * fs_mult; | 363 const size_t signal_length = 256 * fs_mult; |
362 const int16_t* audio_history = | 364 const int16_t* audio_history = |
363 &(*sync_buffer_)[0][sync_buffer_->Size() - signal_length]; | 365 &(*sync_buffer_)[0][sync_buffer_->Size() - signal_length]; |
364 | 366 |
365 // Initialize. | 367 // Initialize. |
366 InitializeForAnExpandPeriod(); | 368 InitializeForAnExpandPeriod(); |
367 | 369 |
368 // Calculate correlation in downsampled domain (4 kHz sample rate). | 370 // Calculate correlation in downsampled domain (4 kHz sample rate). |
369 int correlation_scale; | 371 int16_t correlation_scale; |
370 int correlation_length = 51; // TODO(hlundin): Legacy bit-exactness. | 372 int correlation_length = 51; // TODO(hlundin): Legacy bit-exactness. |
371 // If it is decided to break bit-exactness |correlation_length| should be | 373 // If it is decided to break bit-exactness |correlation_length| should be |
372 // initialized to the return value of Correlation(). | 374 // initialized to the return value of Correlation(). |
373 Correlation(audio_history, signal_length, correlation_vector, | 375 Correlation(audio_history, signal_length, correlation_vector, |
374 &correlation_scale); | 376 &correlation_scale); |
375 | 377 |
376 // Find peaks in correlation vector. | 378 // Find peaks in correlation vector. |
377 DspHelper::PeakDetection(correlation_vector, correlation_length, | 379 DspHelper::PeakDetection(correlation_vector, correlation_length, |
378 kNumCorrelationCandidates, fs_mult, | 380 kNumCorrelationCandidates, fs_mult, |
379 best_correlation_index, best_correlation); | 381 best_correlation_index, best_correlation); |
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
437 | 439 |
438 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) { | 440 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) { |
439 ChannelParameters& parameters = channel_parameters_[channel_ix]; | 441 ChannelParameters& parameters = channel_parameters_[channel_ix]; |
440 // Calculate suitable scaling. | 442 // Calculate suitable scaling. |
441 int16_t signal_max = WebRtcSpl_MaxAbsValueW16( | 443 int16_t signal_max = WebRtcSpl_MaxAbsValueW16( |
442 &audio_history[signal_length - correlation_length - start_index | 444 &audio_history[signal_length - correlation_length - start_index |
443 - correlation_lags], | 445 - correlation_lags], |
444 correlation_length + start_index + correlation_lags - 1); | 446 correlation_length + start_index + correlation_lags - 1); |
445 correlation_scale = ((31 - WebRtcSpl_NormW32(signal_max * signal_max)) | 447 correlation_scale = ((31 - WebRtcSpl_NormW32(signal_max * signal_max)) |
446 + (31 - WebRtcSpl_NormW32(correlation_length))) - 31; | 448 + (31 - WebRtcSpl_NormW32(correlation_length))) - 31; |
447 correlation_scale = std::max(0, correlation_scale); | 449 correlation_scale = std::max(static_cast<int16_t>(0), correlation_scale); |
448 | 450 |
449 // Calculate the correlation, store in |correlation_vector2|. | 451 // Calculate the correlation, store in |correlation_vector2|. |
450 WebRtcSpl_CrossCorrelation( | 452 WebRtcSpl_CrossCorrelation( |
451 correlation_vector2, | 453 correlation_vector2, |
452 &(audio_history[signal_length - correlation_length]), | 454 &(audio_history[signal_length - correlation_length]), |
453 &(audio_history[signal_length - correlation_length - start_index]), | 455 &(audio_history[signal_length - correlation_length - start_index]), |
454 correlation_length, correlation_lags, correlation_scale, -1); | 456 correlation_length, correlation_lags, correlation_scale, -1); |
455 | 457 |
456 // Find maximizing index. | 458 // Find maximizing index. |
457 best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags); | 459 best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags); |
458 int32_t max_correlation = correlation_vector2[best_index]; | 460 int32_t max_correlation = correlation_vector2[best_index]; |
459 // Compensate index with start offset. | 461 // Compensate index with start offset. |
460 best_index = best_index + start_index; | 462 best_index = best_index + start_index; |
461 | 463 |
462 // Calculate energies. | 464 // Calculate energies. |
463 int32_t energy1 = WebRtcSpl_DotProductWithScale( | 465 int32_t energy1 = WebRtcSpl_DotProductWithScale( |
464 &(audio_history[signal_length - correlation_length]), | 466 &(audio_history[signal_length - correlation_length]), |
465 &(audio_history[signal_length - correlation_length]), | 467 &(audio_history[signal_length - correlation_length]), |
466 correlation_length, correlation_scale); | 468 correlation_length, correlation_scale); |
467 int32_t energy2 = WebRtcSpl_DotProductWithScale( | 469 int32_t energy2 = WebRtcSpl_DotProductWithScale( |
468 &(audio_history[signal_length - correlation_length - best_index]), | 470 &(audio_history[signal_length - correlation_length - best_index]), |
469 &(audio_history[signal_length - correlation_length - best_index]), | 471 &(audio_history[signal_length - correlation_length - best_index]), |
470 correlation_length, correlation_scale); | 472 correlation_length, correlation_scale); |
471 | 473 |
472 // Calculate the correlation coefficient between the two portions of the | 474 // Calculate the correlation coefficient between the two portions of the |
473 // signal. | 475 // signal. |
474 int32_t corr_coefficient; | 476 int16_t corr_coefficient; |
475 if ((energy1 > 0) && (energy2 > 0)) { | 477 if ((energy1 > 0) && (energy2 > 0)) { |
476 int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0); | 478 int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0); |
477 int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0); | 479 int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0); |
478 // Make sure total scaling is even (to simplify scale factor after sqrt). | 480 // Make sure total scaling is even (to simplify scale factor after sqrt). |
479 if ((energy1_scale + energy2_scale) & 1) { | 481 if ((energy1_scale + energy2_scale) & 1) { |
480 // If sum is odd, add 1 to make it even. | 482 // If sum is odd, add 1 to make it even. |
481 energy1_scale += 1; | 483 energy1_scale += 1; |
482 } | 484 } |
483 int32_t scaled_energy1 = energy1 >> energy1_scale; | 485 int16_t scaled_energy1 = energy1 >> energy1_scale; |
484 int32_t scaled_energy2 = energy2 >> energy2_scale; | 486 int16_t scaled_energy2 = energy2 >> energy2_scale; |
485 int16_t sqrt_energy_product = static_cast<int16_t>( | 487 int16_t sqrt_energy_product = WebRtcSpl_SqrtFloor( |
486 WebRtcSpl_SqrtFloor(scaled_energy1 * scaled_energy2)); | 488 scaled_energy1 * scaled_energy2); |
487 // Calculate max_correlation / sqrt(energy1 * energy2) in Q14. | 489 // Calculate max_correlation / sqrt(energy1 * energy2) in Q14. |
488 int cc_shift = 14 - (energy1_scale + energy2_scale) / 2; | 490 int cc_shift = 14 - (energy1_scale + energy2_scale) / 2; |
489 max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift); | 491 max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift); |
490 corr_coefficient = WebRtcSpl_DivW32W16(max_correlation, | 492 corr_coefficient = WebRtcSpl_DivW32W16(max_correlation, |
491 sqrt_energy_product); | 493 sqrt_energy_product); |
492 // Cap at 1.0 in Q14. | 494 corr_coefficient = std::min(static_cast<int16_t>(16384), |
493 corr_coefficient = std::min(16384, corr_coefficient); | 495 corr_coefficient); // Cap at 1.0 in Q14. |
494 } else { | 496 } else { |
495 corr_coefficient = 0; | 497 corr_coefficient = 0; |
496 } | 498 } |
497 | 499 |
498 // Extract the two vectors expand_vector0 and expand_vector1 from | 500 // Extract the two vectors expand_vector0 and expand_vector1 from |
499 // |audio_history|. | 501 // |audio_history|. |
500 int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_); | 502 int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_); |
501 const int16_t* vector1 = &(audio_history[signal_length - expansion_length]); | 503 const int16_t* vector1 = &(audio_history[signal_length - expansion_length]); |
502 const int16_t* vector2 = vector1 - distortion_lag; | 504 const int16_t* vector2 = vector1 - distortion_lag; |
503 // Normalize the second vector to the same energy as the first. | 505 // Normalize the second vector to the same energy as the first. |
504 energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length, | 506 energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length, |
505 correlation_scale); | 507 correlation_scale); |
506 energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length, | 508 energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length, |
507 correlation_scale); | 509 correlation_scale); |
508 // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0, | 510 // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0, |
509 // i.e., energy1 / energy1 is within 0.25 - 4. | 511 // i.e., energy1 / energy1 is within 0.25 - 4. |
510 int16_t amplitude_ratio; | 512 int16_t amplitude_ratio; |
511 if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) { | 513 if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) { |
512 // Energy constraint fulfilled. Use both vectors and scale them | 514 // Energy constraint fulfilled. Use both vectors and scale them |
513 // accordingly. | 515 // accordingly. |
514 int32_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0); | 516 int16_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0); |
515 int32_t scaled_energy1 = scaled_energy2 - 13; | 517 int16_t scaled_energy1 = scaled_energy2 - 13; |
516 // Calculate scaled_energy1 / scaled_energy2 in Q13. | 518 // Calculate scaled_energy1 / scaled_energy2 in Q13. |
517 int32_t energy_ratio = WebRtcSpl_DivW32W16( | 519 int32_t energy_ratio = WebRtcSpl_DivW32W16( |
518 WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1), | 520 WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1), |
519 energy2 >> scaled_energy2); | 521 energy2 >> scaled_energy2); |
520 // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26). | 522 // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26). |
521 amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13); | 523 amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13); |
522 // Copy the two vectors and give them the same energy. | 524 // Copy the two vectors and give them the same energy. |
523 parameters.expand_vector0.Clear(); | 525 parameters.expand_vector0.Clear(); |
524 parameters.expand_vector0.PushBack(vector1, expansion_length); | 526 parameters.expand_vector0.PushBack(vector1, expansion_length); |
525 parameters.expand_vector1.Clear(); | 527 parameters.expand_vector1.Clear(); |
(...skipping 147 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
673 parameters.ar_gain = unvoiced_gain; | 675 parameters.ar_gain = unvoiced_gain; |
674 | 676 |
675 // Calculate voice_mix_factor from corr_coefficient. | 677 // Calculate voice_mix_factor from corr_coefficient. |
676 // Let x = corr_coefficient. Then, we compute: | 678 // Let x = corr_coefficient. Then, we compute: |
677 // if (x > 0.48) | 679 // if (x > 0.48) |
678 // voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096; | 680 // voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096; |
679 // else | 681 // else |
680 // voice_mix_factor = 0; | 682 // voice_mix_factor = 0; |
681 if (corr_coefficient > 7875) { | 683 if (corr_coefficient > 7875) { |
682 int16_t x1, x2, x3; | 684 int16_t x1, x2, x3; |
683 // |corr_coefficient| is in Q14. | 685 x1 = corr_coefficient; // |corr_coefficient| is in Q14. |
684 x1 = static_cast<int16_t>(corr_coefficient); | |
685 x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14. | 686 x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14. |
686 x3 = (x1 * x2) >> 14; | 687 x3 = (x1 * x2) >> 14; |
687 static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 }; | 688 static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 }; |
688 int32_t temp_sum = kCoefficients[0] << 14; | 689 int32_t temp_sum = kCoefficients[0] << 14; |
689 temp_sum += kCoefficients[1] * x1; | 690 temp_sum += kCoefficients[1] * x1; |
690 temp_sum += kCoefficients[2] * x2; | 691 temp_sum += kCoefficients[2] * x2; |
691 temp_sum += kCoefficients[3] * x3; | 692 temp_sum += kCoefficients[3] * x3; |
692 parameters.voice_mix_factor = | 693 parameters.voice_mix_factor = |
693 static_cast<int16_t>(std::min(temp_sum / 4096, 16384)); | 694 static_cast<int16_t>(std::min(temp_sum / 4096, 16384)); |
694 parameters.voice_mix_factor = std::max(parameters.voice_mix_factor, | 695 parameters.voice_mix_factor = std::max(parameters.voice_mix_factor, |
695 static_cast<int16_t>(0)); | 696 static_cast<int16_t>(0)); |
696 } else { | 697 } else { |
697 parameters.voice_mix_factor = 0; | 698 parameters.voice_mix_factor = 0; |
698 } | 699 } |
699 | 700 |
700 // Calculate muting slope. Reuse value from earlier scaling of | 701 // Calculate muting slope. Reuse value from earlier scaling of |
701 // |expand_vector0| and |expand_vector1|. | 702 // |expand_vector0| and |expand_vector1|. |
702 int16_t slope = amplitude_ratio; | 703 int16_t slope = amplitude_ratio; |
703 if (slope > 12288) { | 704 if (slope > 12288) { |
704 // slope > 1.5. | 705 // slope > 1.5. |
705 // Calculate (1 - (1 / slope)) / distortion_lag = | 706 // Calculate (1 - (1 / slope)) / distortion_lag = |
706 // (slope - 1) / (distortion_lag * slope). | 707 // (slope - 1) / (distortion_lag * slope). |
707 // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before | 708 // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before |
708 // the division. | 709 // the division. |
709 // Shift the denominator from Q13 to Q5 before the division. The result of | 710 // Shift the denominator from Q13 to Q5 before the division. The result of |
710 // the division will then be in Q20. | 711 // the division will then be in Q20. |
711 int temp_ratio = WebRtcSpl_DivW32W16((slope - 8192) << 12, | 712 int16_t temp_ratio = WebRtcSpl_DivW32W16((slope - 8192) << 12, |
712 (distortion_lag * slope) >> 8); | 713 (distortion_lag * slope) >> 8); |
713 if (slope > 14746) { | 714 if (slope > 14746) { |
714 // slope > 1.8. | 715 // slope > 1.8. |
715 // Divide by 2, with proper rounding. | 716 // Divide by 2, with proper rounding. |
716 parameters.mute_slope = (temp_ratio + 1) / 2; | 717 parameters.mute_slope = (temp_ratio + 1) / 2; |
717 } else { | 718 } else { |
718 // Divide by 8, with proper rounding. | 719 // Divide by 8, with proper rounding. |
719 parameters.mute_slope = (temp_ratio + 4) / 8; | 720 parameters.mute_slope = (temp_ratio + 4) / 8; |
720 } | 721 } |
721 parameters.onset = true; | 722 parameters.onset = true; |
722 } else { | 723 } else { |
723 // Calculate (1 - slope) / distortion_lag. | 724 // Calculate (1 - slope) / distortion_lag. |
724 // Shift |slope| by 7 to Q20 before the division. The result is in Q20. | 725 // Shift |slope| by 7 to Q20 before the division. The result is in Q20. |
725 parameters.mute_slope = WebRtcSpl_DivW32W16((8192 - slope) << 7, | 726 parameters.mute_slope = WebRtcSpl_DivW32W16((8192 - slope) << 7, |
726 distortion_lag); | 727 distortion_lag); |
727 if (parameters.voice_mix_factor <= 13107) { | 728 if (parameters.voice_mix_factor <= 13107) { |
728 // Make sure the mute factor decreases from 1.0 to 0.9 in no more than | 729 // Make sure the mute factor decreases from 1.0 to 0.9 in no more than |
729 // 6.25 ms. | 730 // 6.25 ms. |
730 // mute_slope >= 0.005 / fs_mult in Q20. | 731 // mute_slope >= 0.005 / fs_mult in Q20. |
731 parameters.mute_slope = std::max(5243 / fs_mult, parameters.mute_slope); | 732 parameters.mute_slope = std::max(static_cast<int16_t>(5243 / fs_mult), |
| 733 parameters.mute_slope); |
732 } else if (slope > 8028) { | 734 } else if (slope > 8028) { |
733 parameters.mute_slope = 0; | 735 parameters.mute_slope = 0; |
734 } | 736 } |
735 parameters.onset = false; | 737 parameters.onset = false; |
736 } | 738 } |
737 } | 739 } |
738 } | 740 } |
739 | 741 |
740 Expand::ChannelParameters::ChannelParameters() | 742 Expand::ChannelParameters::ChannelParameters() |
741 : mute_factor(16384), | 743 : mute_factor(16384), |
742 ar_gain(0), | 744 ar_gain(0), |
743 ar_gain_scale(0), | 745 ar_gain_scale(0), |
744 voice_mix_factor(0), | 746 voice_mix_factor(0), |
745 current_voice_mix_factor(0), | 747 current_voice_mix_factor(0), |
746 onset(false), | 748 onset(false), |
747 mute_slope(0) { | 749 mute_slope(0) { |
748 memset(ar_filter, 0, sizeof(ar_filter)); | 750 memset(ar_filter, 0, sizeof(ar_filter)); |
749 memset(ar_filter_state, 0, sizeof(ar_filter_state)); | 751 memset(ar_filter_state, 0, sizeof(ar_filter_state)); |
750 } | 752 } |
751 | 753 |
752 int16_t Expand::Correlation(const int16_t* input, size_t input_length, | 754 int16_t Expand::Correlation(const int16_t* input, size_t input_length, |
753 int16_t* output, int* output_scale) const { | 755 int16_t* output, int16_t* output_scale) const { |
754 // Set parameters depending on sample rate. | 756 // Set parameters depending on sample rate. |
755 const int16_t* filter_coefficients; | 757 const int16_t* filter_coefficients; |
756 int16_t num_coefficients; | 758 int16_t num_coefficients; |
757 int16_t downsampling_factor; | 759 int16_t downsampling_factor; |
758 if (fs_hz_ == 8000) { | 760 if (fs_hz_ == 8000) { |
759 num_coefficients = 3; | 761 num_coefficients = 3; |
760 downsampling_factor = 2; | 762 downsampling_factor = 2; |
761 filter_coefficients = DspHelper::kDownsample8kHzTbl; | 763 filter_coefficients = DspHelper::kDownsample8kHzTbl; |
762 } else if (fs_hz_ == 16000) { | 764 } else if (fs_hz_ == 16000) { |
763 num_coefficients = 5; | 765 num_coefficients = 5; |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
832 RandomVector* random_vector, | 834 RandomVector* random_vector, |
833 int fs, | 835 int fs, |
834 size_t num_channels) const { | 836 size_t num_channels) const { |
835 return new Expand(background_noise, sync_buffer, random_vector, fs, | 837 return new Expand(background_noise, sync_buffer, random_vector, fs, |
836 num_channels); | 838 num_channels); |
837 } | 839 } |
838 | 840 |
839 // TODO(turajs): This can be moved to BackgroundNoise class. | 841 // TODO(turajs): This can be moved to BackgroundNoise class. |
840 void Expand::GenerateBackgroundNoise(int16_t* random_vector, | 842 void Expand::GenerateBackgroundNoise(int16_t* random_vector, |
841 size_t channel, | 843 size_t channel, |
842 int mute_slope, | 844 int16_t mute_slope, |
843 bool too_many_expands, | 845 bool too_many_expands, |
844 size_t num_noise_samples, | 846 size_t num_noise_samples, |
845 int16_t* buffer) { | 847 int16_t* buffer) { |
846 static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder; | 848 static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder; |
847 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125]; | 849 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125]; |
848 assert(static_cast<size_t>(kMaxSampleRate / 8000 * 125) >= num_noise_samples); | 850 assert(static_cast<size_t>(kMaxSampleRate / 8000 * 125) >= num_noise_samples); |
849 int16_t* noise_samples = &buffer[kNoiseLpcOrder]; | 851 int16_t* noise_samples = &buffer[kNoiseLpcOrder]; |
850 if (background_noise_->initialized()) { | 852 if (background_noise_->initialized()) { |
851 // Use background noise parameters. | 853 // Use background noise parameters. |
852 memcpy(noise_samples - kNoiseLpcOrder, | 854 memcpy(noise_samples - kNoiseLpcOrder, |
(...skipping 22 matching lines...) Expand all Loading... |
875 &(noise_samples[num_noise_samples - kNoiseLpcOrder]), | 877 &(noise_samples[num_noise_samples - kNoiseLpcOrder]), |
876 kNoiseLpcOrder); | 878 kNoiseLpcOrder); |
877 | 879 |
878 // Unmute the background noise. | 880 // Unmute the background noise. |
879 int16_t bgn_mute_factor = background_noise_->MuteFactor(channel); | 881 int16_t bgn_mute_factor = background_noise_->MuteFactor(channel); |
880 NetEq::BackgroundNoiseMode bgn_mode = background_noise_->mode(); | 882 NetEq::BackgroundNoiseMode bgn_mode = background_noise_->mode(); |
881 if (bgn_mode == NetEq::kBgnFade && too_many_expands && | 883 if (bgn_mode == NetEq::kBgnFade && too_many_expands && |
882 bgn_mute_factor > 0) { | 884 bgn_mute_factor > 0) { |
883 // Fade BGN to zero. | 885 // Fade BGN to zero. |
884 // Calculate muting slope, approximately -2^18 / fs_hz. | 886 // Calculate muting slope, approximately -2^18 / fs_hz. |
885 int mute_slope; | 887 int16_t mute_slope; |
886 if (fs_hz_ == 8000) { | 888 if (fs_hz_ == 8000) { |
887 mute_slope = -32; | 889 mute_slope = -32; |
888 } else if (fs_hz_ == 16000) { | 890 } else if (fs_hz_ == 16000) { |
889 mute_slope = -16; | 891 mute_slope = -16; |
890 } else if (fs_hz_ == 32000) { | 892 } else if (fs_hz_ == 32000) { |
891 mute_slope = -8; | 893 mute_slope = -8; |
892 } else { | 894 } else { |
893 mute_slope = -5; | 895 mute_slope = -5; |
894 } | 896 } |
895 // Use UnmuteSignal function with negative slope. | 897 // Use UnmuteSignal function with negative slope. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
935 const size_t kMaxRandSamples = RandomVector::kRandomTableSize; | 937 const size_t kMaxRandSamples = RandomVector::kRandomTableSize; |
936 while (samples_generated < length) { | 938 while (samples_generated < length) { |
937 size_t rand_length = std::min(length - samples_generated, kMaxRandSamples); | 939 size_t rand_length = std::min(length - samples_generated, kMaxRandSamples); |
938 random_vector_->IncreaseSeedIncrement(seed_increment); | 940 random_vector_->IncreaseSeedIncrement(seed_increment); |
939 random_vector_->Generate(rand_length, &random_vector[samples_generated]); | 941 random_vector_->Generate(rand_length, &random_vector[samples_generated]); |
940 samples_generated += rand_length; | 942 samples_generated += rand_length; |
941 } | 943 } |
942 } | 944 } |
943 | 945 |
944 } // namespace webrtc | 946 } // namespace webrtc |
OLD | NEW |