OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 221 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
232 voiced_vector + temp_length, parameters.current_voice_mix_factor, | 232 voiced_vector + temp_length, parameters.current_voice_mix_factor, |
233 unvoiced_vector + temp_length, temp_scale, 14, | 233 unvoiced_vector + temp_length, temp_scale, 14, |
234 temp_data + temp_length, static_cast<int>(current_lag - temp_length)); | 234 temp_data + temp_length, static_cast<int>(current_lag - temp_length)); |
235 } | 235 } |
236 | 236 |
237 // Select muting slope depending on how many consecutive expands we have | 237 // Select muting slope depending on how many consecutive expands we have |
238 // done. | 238 // done. |
239 if (consecutive_expands_ == 3) { | 239 if (consecutive_expands_ == 3) { |
240 // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms. | 240 // Let the mute factor decrease from 1.0 to 0.95 in 6.25 ms. |
241 // mute_slope = 0.0010 / fs_mult in Q20. | 241 // mute_slope = 0.0010 / fs_mult in Q20. |
242 parameters.mute_slope = std::max(parameters.mute_slope, | 242 parameters.mute_slope = std::max(parameters.mute_slope, 1049 / fs_mult); |
243 static_cast<int16_t>(1049 / fs_mult)); | |
244 } | 243 } |
245 if (consecutive_expands_ == 7) { | 244 if (consecutive_expands_ == 7) { |
246 // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms. | 245 // Let the mute factor decrease from 1.0 to 0.90 in 6.25 ms. |
247 // mute_slope = 0.0020 / fs_mult in Q20. | 246 // mute_slope = 0.0020 / fs_mult in Q20. |
248 parameters.mute_slope = std::max(parameters.mute_slope, | 247 parameters.mute_slope = std::max(parameters.mute_slope, 2097 / fs_mult); |
249 static_cast<int16_t>(2097 / fs_mult)); | |
250 } | 248 } |
251 | 249 |
252 // Mute segment according to slope value. | 250 // Mute segment according to slope value. |
253 if ((consecutive_expands_ != 0) || !parameters.onset) { | 251 if ((consecutive_expands_ != 0) || !parameters.onset) { |
254 // Mute to the previous level, then continue with the muting. | 252 // Mute to the previous level, then continue with the muting. |
255 WebRtcSpl_AffineTransformVector(temp_data, temp_data, | 253 WebRtcSpl_AffineTransformVector(temp_data, temp_data, |
256 parameters.mute_factor, 8192, | 254 parameters.mute_factor, 8192, |
257 14, static_cast<int>(current_lag)); | 255 14, static_cast<int>(current_lag)); |
258 | 256 |
259 if (!stop_muting_) { | 257 if (!stop_muting_) { |
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
361 int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength; | 359 int fs_mult_lpc_analysis_len = fs_mult * kLpcAnalysisLength; |
362 | 360 |
363 const size_t signal_length = 256 * fs_mult; | 361 const size_t signal_length = 256 * fs_mult; |
364 const int16_t* audio_history = | 362 const int16_t* audio_history = |
365 &(*sync_buffer_)[0][sync_buffer_->Size() - signal_length]; | 363 &(*sync_buffer_)[0][sync_buffer_->Size() - signal_length]; |
366 | 364 |
367 // Initialize. | 365 // Initialize. |
368 InitializeForAnExpandPeriod(); | 366 InitializeForAnExpandPeriod(); |
369 | 367 |
370 // Calculate correlation in downsampled domain (4 kHz sample rate). | 368 // Calculate correlation in downsampled domain (4 kHz sample rate). |
371 int16_t correlation_scale; | 369 int correlation_scale; |
372 int correlation_length = 51; // TODO(hlundin): Legacy bit-exactness. | 370 int correlation_length = 51; // TODO(hlundin): Legacy bit-exactness. |
373 // If it is decided to break bit-exactness |correlation_length| should be | 371 // If it is decided to break bit-exactness |correlation_length| should be |
374 // initialized to the return value of Correlation(). | 372 // initialized to the return value of Correlation(). |
375 Correlation(audio_history, signal_length, correlation_vector, | 373 Correlation(audio_history, signal_length, correlation_vector, |
376 &correlation_scale); | 374 &correlation_scale); |
377 | 375 |
378 // Find peaks in correlation vector. | 376 // Find peaks in correlation vector. |
379 DspHelper::PeakDetection(correlation_vector, correlation_length, | 377 DspHelper::PeakDetection(correlation_vector, correlation_length, |
380 kNumCorrelationCandidates, fs_mult, | 378 kNumCorrelationCandidates, fs_mult, |
381 best_correlation_index, best_correlation); | 379 best_correlation_index, best_correlation); |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
438 | 436 |
439 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) { | 437 for (size_t channel_ix = 0; channel_ix < num_channels_; ++channel_ix) { |
440 ChannelParameters& parameters = channel_parameters_[channel_ix]; | 438 ChannelParameters& parameters = channel_parameters_[channel_ix]; |
441 // Calculate suitable scaling. | 439 // Calculate suitable scaling. |
442 int16_t signal_max = WebRtcSpl_MaxAbsValueW16( | 440 int16_t signal_max = WebRtcSpl_MaxAbsValueW16( |
443 &audio_history[signal_length - correlation_length - start_index | 441 &audio_history[signal_length - correlation_length - start_index |
444 - correlation_lags], | 442 - correlation_lags], |
445 correlation_length + start_index + correlation_lags - 1); | 443 correlation_length + start_index + correlation_lags - 1); |
446 correlation_scale = ((31 - WebRtcSpl_NormW32(signal_max * signal_max)) | 444 correlation_scale = ((31 - WebRtcSpl_NormW32(signal_max * signal_max)) |
447 + (31 - WebRtcSpl_NormW32(correlation_length))) - 31; | 445 + (31 - WebRtcSpl_NormW32(correlation_length))) - 31; |
448 correlation_scale = std::max(static_cast<int16_t>(0), correlation_scale); | 446 correlation_scale = std::max(0, correlation_scale); |
449 | 447 |
450 // Calculate the correlation, store in |correlation_vector2|. | 448 // Calculate the correlation, store in |correlation_vector2|. |
451 WebRtcSpl_CrossCorrelation( | 449 WebRtcSpl_CrossCorrelation( |
452 correlation_vector2, | 450 correlation_vector2, |
453 &(audio_history[signal_length - correlation_length]), | 451 &(audio_history[signal_length - correlation_length]), |
454 &(audio_history[signal_length - correlation_length - start_index]), | 452 &(audio_history[signal_length - correlation_length - start_index]), |
455 correlation_length, correlation_lags, correlation_scale, -1); | 453 correlation_length, correlation_lags, correlation_scale, -1); |
456 | 454 |
457 // Find maximizing index. | 455 // Find maximizing index. |
458 best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags); | 456 best_index = WebRtcSpl_MaxIndexW32(correlation_vector2, correlation_lags); |
459 int32_t max_correlation = correlation_vector2[best_index]; | 457 int32_t max_correlation = correlation_vector2[best_index]; |
460 // Compensate index with start offset. | 458 // Compensate index with start offset. |
461 best_index = best_index + start_index; | 459 best_index = best_index + start_index; |
462 | 460 |
463 // Calculate energies. | 461 // Calculate energies. |
464 int32_t energy1 = WebRtcSpl_DotProductWithScale( | 462 int32_t energy1 = WebRtcSpl_DotProductWithScale( |
465 &(audio_history[signal_length - correlation_length]), | 463 &(audio_history[signal_length - correlation_length]), |
466 &(audio_history[signal_length - correlation_length]), | 464 &(audio_history[signal_length - correlation_length]), |
467 correlation_length, correlation_scale); | 465 correlation_length, correlation_scale); |
468 int32_t energy2 = WebRtcSpl_DotProductWithScale( | 466 int32_t energy2 = WebRtcSpl_DotProductWithScale( |
469 &(audio_history[signal_length - correlation_length - best_index]), | 467 &(audio_history[signal_length - correlation_length - best_index]), |
470 &(audio_history[signal_length - correlation_length - best_index]), | 468 &(audio_history[signal_length - correlation_length - best_index]), |
471 correlation_length, correlation_scale); | 469 correlation_length, correlation_scale); |
472 | 470 |
473 // Calculate the correlation coefficient between the two portions of the | 471 // Calculate the correlation coefficient between the two portions of the |
474 // signal. | 472 // signal. |
475 int16_t corr_coefficient; | 473 int32_t corr_coefficient; |
476 if ((energy1 > 0) && (energy2 > 0)) { | 474 if ((energy1 > 0) && (energy2 > 0)) { |
477 int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0); | 475 int energy1_scale = std::max(16 - WebRtcSpl_NormW32(energy1), 0); |
478 int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0); | 476 int energy2_scale = std::max(16 - WebRtcSpl_NormW32(energy2), 0); |
479 // Make sure total scaling is even (to simplify scale factor after sqrt). | 477 // Make sure total scaling is even (to simplify scale factor after sqrt). |
480 if ((energy1_scale + energy2_scale) & 1) { | 478 if ((energy1_scale + energy2_scale) & 1) { |
481 // If sum is odd, add 1 to make it even. | 479 // If sum is odd, add 1 to make it even. |
482 energy1_scale += 1; | 480 energy1_scale += 1; |
483 } | 481 } |
484 int16_t scaled_energy1 = energy1 >> energy1_scale; | 482 int32_t scaled_energy1 = energy1 >> energy1_scale; |
485 int16_t scaled_energy2 = energy2 >> energy2_scale; | 483 int32_t scaled_energy2 = energy2 >> energy2_scale; |
486 int16_t sqrt_energy_product = WebRtcSpl_SqrtFloor( | 484 int16_t sqrt_energy_product = static_cast<int16_t>( |
487 scaled_energy1 * scaled_energy2); | 485 WebRtcSpl_SqrtFloor(scaled_energy1 * scaled_energy2)); |
488 // Calculate max_correlation / sqrt(energy1 * energy2) in Q14. | 486 // Calculate max_correlation / sqrt(energy1 * energy2) in Q14. |
489 int cc_shift = 14 - (energy1_scale + energy2_scale) / 2; | 487 int cc_shift = 14 - (energy1_scale + energy2_scale) / 2; |
490 max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift); | 488 max_correlation = WEBRTC_SPL_SHIFT_W32(max_correlation, cc_shift); |
491 corr_coefficient = WebRtcSpl_DivW32W16(max_correlation, | 489 corr_coefficient = WebRtcSpl_DivW32W16(max_correlation, |
492 sqrt_energy_product); | 490 sqrt_energy_product); |
493 corr_coefficient = std::min(static_cast<int16_t>(16384), | 491 // Cap at 1.0 in Q14. |
494 corr_coefficient); // Cap at 1.0 in Q14. | 492 corr_coefficient = std::min(16384, corr_coefficient); |
495 } else { | 493 } else { |
496 corr_coefficient = 0; | 494 corr_coefficient = 0; |
497 } | 495 } |
498 | 496 |
499 // Extract the two vectors expand_vector0 and expand_vector1 from | 497 // Extract the two vectors expand_vector0 and expand_vector1 from |
500 // |audio_history|. | 498 // |audio_history|. |
501 int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_); | 499 int16_t expansion_length = static_cast<int16_t>(max_lag_ + overlap_length_); |
502 const int16_t* vector1 = &(audio_history[signal_length - expansion_length]); | 500 const int16_t* vector1 = &(audio_history[signal_length - expansion_length]); |
503 const int16_t* vector2 = vector1 - distortion_lag; | 501 const int16_t* vector2 = vector1 - distortion_lag; |
504 // Normalize the second vector to the same energy as the first. | 502 // Normalize the second vector to the same energy as the first. |
505 energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length, | 503 energy1 = WebRtcSpl_DotProductWithScale(vector1, vector1, expansion_length, |
506 correlation_scale); | 504 correlation_scale); |
507 energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length, | 505 energy2 = WebRtcSpl_DotProductWithScale(vector2, vector2, expansion_length, |
508 correlation_scale); | 506 correlation_scale); |
509 // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0, | 507 // Confirm that amplitude ratio sqrt(energy1 / energy2) is within 0.5 - 2.0, |
510 // i.e., energy1 / energy1 is within 0.25 - 4. | 508 // i.e., energy1 / energy1 is within 0.25 - 4. |
511 int16_t amplitude_ratio; | 509 int16_t amplitude_ratio; |
512 if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) { | 510 if ((energy1 / 4 < energy2) && (energy1 > energy2 / 4)) { |
513 // Energy constraint fulfilled. Use both vectors and scale them | 511 // Energy constraint fulfilled. Use both vectors and scale them |
514 // accordingly. | 512 // accordingly. |
515 int16_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0); | 513 int32_t scaled_energy2 = std::max(16 - WebRtcSpl_NormW32(energy2), 0); |
516 int16_t scaled_energy1 = scaled_energy2 - 13; | 514 int32_t scaled_energy1 = scaled_energy2 - 13; |
517 // Calculate scaled_energy1 / scaled_energy2 in Q13. | 515 // Calculate scaled_energy1 / scaled_energy2 in Q13. |
518 int32_t energy_ratio = WebRtcSpl_DivW32W16( | 516 int32_t energy_ratio = WebRtcSpl_DivW32W16( |
519 WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1), | 517 WEBRTC_SPL_SHIFT_W32(energy1, -scaled_energy1), |
520 energy2 >> scaled_energy2); | 518 energy2 >> scaled_energy2); |
521 // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26). | 519 // Calculate sqrt ratio in Q13 (sqrt of en1/en2 in Q26). |
522 amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13); | 520 amplitude_ratio = WebRtcSpl_SqrtFloor(energy_ratio << 13); |
523 // Copy the two vectors and give them the same energy. | 521 // Copy the two vectors and give them the same energy. |
524 parameters.expand_vector0.Clear(); | 522 parameters.expand_vector0.Clear(); |
525 parameters.expand_vector0.PushBack(vector1, expansion_length); | 523 parameters.expand_vector0.PushBack(vector1, expansion_length); |
526 parameters.expand_vector1.Clear(); | 524 parameters.expand_vector1.Clear(); |
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
675 parameters.ar_gain = unvoiced_gain; | 673 parameters.ar_gain = unvoiced_gain; |
676 | 674 |
677 // Calculate voice_mix_factor from corr_coefficient. | 675 // Calculate voice_mix_factor from corr_coefficient. |
678 // Let x = corr_coefficient. Then, we compute: | 676 // Let x = corr_coefficient. Then, we compute: |
679 // if (x > 0.48) | 677 // if (x > 0.48) |
680 // voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096; | 678 // voice_mix_factor = (-5179 + 19931x - 16422x^2 + 5776x^3) / 4096; |
681 // else | 679 // else |
682 // voice_mix_factor = 0; | 680 // voice_mix_factor = 0; |
683 if (corr_coefficient > 7875) { | 681 if (corr_coefficient > 7875) { |
684 int16_t x1, x2, x3; | 682 int16_t x1, x2, x3; |
685 x1 = corr_coefficient; // |corr_coefficient| is in Q14. | 683 // |corr_coefficient| is in Q14. |
| 684 x1 = static_cast<int16_t>(corr_coefficient); |
686 x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14. | 685 x2 = (x1 * x1) >> 14; // Shift 14 to keep result in Q14. |
687 x3 = (x1 * x2) >> 14; | 686 x3 = (x1 * x2) >> 14; |
688 static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 }; | 687 static const int kCoefficients[4] = { -5179, 19931, -16422, 5776 }; |
689 int32_t temp_sum = kCoefficients[0] << 14; | 688 int32_t temp_sum = kCoefficients[0] << 14; |
690 temp_sum += kCoefficients[1] * x1; | 689 temp_sum += kCoefficients[1] * x1; |
691 temp_sum += kCoefficients[2] * x2; | 690 temp_sum += kCoefficients[2] * x2; |
692 temp_sum += kCoefficients[3] * x3; | 691 temp_sum += kCoefficients[3] * x3; |
693 parameters.voice_mix_factor = | 692 parameters.voice_mix_factor = |
694 static_cast<int16_t>(std::min(temp_sum / 4096, 16384)); | 693 static_cast<int16_t>(std::min(temp_sum / 4096, 16384)); |
695 parameters.voice_mix_factor = std::max(parameters.voice_mix_factor, | 694 parameters.voice_mix_factor = std::max(parameters.voice_mix_factor, |
696 static_cast<int16_t>(0)); | 695 static_cast<int16_t>(0)); |
697 } else { | 696 } else { |
698 parameters.voice_mix_factor = 0; | 697 parameters.voice_mix_factor = 0; |
699 } | 698 } |
700 | 699 |
701 // Calculate muting slope. Reuse value from earlier scaling of | 700 // Calculate muting slope. Reuse value from earlier scaling of |
702 // |expand_vector0| and |expand_vector1|. | 701 // |expand_vector0| and |expand_vector1|. |
703 int16_t slope = amplitude_ratio; | 702 int16_t slope = amplitude_ratio; |
704 if (slope > 12288) { | 703 if (slope > 12288) { |
705 // slope > 1.5. | 704 // slope > 1.5. |
706 // Calculate (1 - (1 / slope)) / distortion_lag = | 705 // Calculate (1 - (1 / slope)) / distortion_lag = |
707 // (slope - 1) / (distortion_lag * slope). | 706 // (slope - 1) / (distortion_lag * slope). |
708 // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before | 707 // |slope| is in Q13, so 1 corresponds to 8192. Shift up to Q25 before |
709 // the division. | 708 // the division. |
710 // Shift the denominator from Q13 to Q5 before the division. The result of | 709 // Shift the denominator from Q13 to Q5 before the division. The result of |
711 // the division will then be in Q20. | 710 // the division will then be in Q20. |
712 int16_t temp_ratio = WebRtcSpl_DivW32W16( | 711 int temp_ratio = WebRtcSpl_DivW32W16( |
713 (slope - 8192) << 12, | 712 (slope - 8192) << 12, |
714 static_cast<int16_t>((distortion_lag * slope) >> 8)); | 713 static_cast<int16_t>((distortion_lag * slope) >> 8)); |
715 if (slope > 14746) { | 714 if (slope > 14746) { |
716 // slope > 1.8. | 715 // slope > 1.8. |
717 // Divide by 2, with proper rounding. | 716 // Divide by 2, with proper rounding. |
718 parameters.mute_slope = (temp_ratio + 1) / 2; | 717 parameters.mute_slope = (temp_ratio + 1) / 2; |
719 } else { | 718 } else { |
720 // Divide by 8, with proper rounding. | 719 // Divide by 8, with proper rounding. |
721 parameters.mute_slope = (temp_ratio + 4) / 8; | 720 parameters.mute_slope = (temp_ratio + 4) / 8; |
722 } | 721 } |
723 parameters.onset = true; | 722 parameters.onset = true; |
724 } else { | 723 } else { |
725 // Calculate (1 - slope) / distortion_lag. | 724 // Calculate (1 - slope) / distortion_lag. |
726 // Shift |slope| by 7 to Q20 before the division. The result is in Q20. | 725 // Shift |slope| by 7 to Q20 before the division. The result is in Q20. |
727 parameters.mute_slope = WebRtcSpl_DivW32W16( | 726 parameters.mute_slope = WebRtcSpl_DivW32W16( |
728 (8192 - slope) << 7, static_cast<int16_t>(distortion_lag)); | 727 (8192 - slope) << 7, static_cast<int16_t>(distortion_lag)); |
729 if (parameters.voice_mix_factor <= 13107) { | 728 if (parameters.voice_mix_factor <= 13107) { |
730 // Make sure the mute factor decreases from 1.0 to 0.9 in no more than | 729 // Make sure the mute factor decreases from 1.0 to 0.9 in no more than |
731 // 6.25 ms. | 730 // 6.25 ms. |
732 // mute_slope >= 0.005 / fs_mult in Q20. | 731 // mute_slope >= 0.005 / fs_mult in Q20. |
733 parameters.mute_slope = std::max(static_cast<int16_t>(5243 / fs_mult), | 732 parameters.mute_slope = std::max(5243 / fs_mult, parameters.mute_slope); |
734 parameters.mute_slope); | |
735 } else if (slope > 8028) { | 733 } else if (slope > 8028) { |
736 parameters.mute_slope = 0; | 734 parameters.mute_slope = 0; |
737 } | 735 } |
738 parameters.onset = false; | 736 parameters.onset = false; |
739 } | 737 } |
740 } | 738 } |
741 } | 739 } |
742 | 740 |
743 Expand::ChannelParameters::ChannelParameters() | 741 Expand::ChannelParameters::ChannelParameters() |
744 : mute_factor(16384), | 742 : mute_factor(16384), |
745 ar_gain(0), | 743 ar_gain(0), |
746 ar_gain_scale(0), | 744 ar_gain_scale(0), |
747 voice_mix_factor(0), | 745 voice_mix_factor(0), |
748 current_voice_mix_factor(0), | 746 current_voice_mix_factor(0), |
749 onset(false), | 747 onset(false), |
750 mute_slope(0) { | 748 mute_slope(0) { |
751 memset(ar_filter, 0, sizeof(ar_filter)); | 749 memset(ar_filter, 0, sizeof(ar_filter)); |
752 memset(ar_filter_state, 0, sizeof(ar_filter_state)); | 750 memset(ar_filter_state, 0, sizeof(ar_filter_state)); |
753 } | 751 } |
754 | 752 |
755 void Expand::Correlation(const int16_t* input, | 753 void Expand::Correlation(const int16_t* input, |
756 size_t input_length, | 754 size_t input_length, |
757 int16_t* output, | 755 int16_t* output, |
758 int16_t* output_scale) const { | 756 int* output_scale) const { |
759 // Set parameters depending on sample rate. | 757 // Set parameters depending on sample rate. |
760 const int16_t* filter_coefficients; | 758 const int16_t* filter_coefficients; |
761 int16_t num_coefficients; | 759 int16_t num_coefficients; |
762 int16_t downsampling_factor; | 760 int16_t downsampling_factor; |
763 if (fs_hz_ == 8000) { | 761 if (fs_hz_ == 8000) { |
764 num_coefficients = 3; | 762 num_coefficients = 3; |
765 downsampling_factor = 2; | 763 downsampling_factor = 2; |
766 filter_coefficients = DspHelper::kDownsample8kHzTbl; | 764 filter_coefficients = DspHelper::kDownsample8kHzTbl; |
767 } else if (fs_hz_ == 16000) { | 765 } else if (fs_hz_ == 16000) { |
768 num_coefficients = 5; | 766 num_coefficients = 5; |
(...skipping 68 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
837 RandomVector* random_vector, | 835 RandomVector* random_vector, |
838 int fs, | 836 int fs, |
839 size_t num_channels) const { | 837 size_t num_channels) const { |
840 return new Expand(background_noise, sync_buffer, random_vector, fs, | 838 return new Expand(background_noise, sync_buffer, random_vector, fs, |
841 num_channels); | 839 num_channels); |
842 } | 840 } |
843 | 841 |
844 // TODO(turajs): This can be moved to BackgroundNoise class. | 842 // TODO(turajs): This can be moved to BackgroundNoise class. |
845 void Expand::GenerateBackgroundNoise(int16_t* random_vector, | 843 void Expand::GenerateBackgroundNoise(int16_t* random_vector, |
846 size_t channel, | 844 size_t channel, |
847 int16_t mute_slope, | 845 int mute_slope, |
848 bool too_many_expands, | 846 bool too_many_expands, |
849 size_t num_noise_samples, | 847 size_t num_noise_samples, |
850 int16_t* buffer) { | 848 int16_t* buffer) { |
851 static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder; | 849 static const int kNoiseLpcOrder = BackgroundNoise::kMaxLpcOrder; |
852 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125]; | 850 int16_t scaled_random_vector[kMaxSampleRate / 8000 * 125]; |
853 assert(num_noise_samples <= static_cast<size_t>(kMaxSampleRate / 8000 * 125)); | 851 assert(num_noise_samples <= static_cast<size_t>(kMaxSampleRate / 8000 * 125)); |
854 int16_t* noise_samples = &buffer[kNoiseLpcOrder]; | 852 int16_t* noise_samples = &buffer[kNoiseLpcOrder]; |
855 if (background_noise_->initialized()) { | 853 if (background_noise_->initialized()) { |
856 // Use background noise parameters. | 854 // Use background noise parameters. |
857 memcpy(noise_samples - kNoiseLpcOrder, | 855 memcpy(noise_samples - kNoiseLpcOrder, |
(...skipping 22 matching lines...) Expand all Loading... |
880 &(noise_samples[num_noise_samples - kNoiseLpcOrder]), | 878 &(noise_samples[num_noise_samples - kNoiseLpcOrder]), |
881 kNoiseLpcOrder); | 879 kNoiseLpcOrder); |
882 | 880 |
883 // Unmute the background noise. | 881 // Unmute the background noise. |
884 int16_t bgn_mute_factor = background_noise_->MuteFactor(channel); | 882 int16_t bgn_mute_factor = background_noise_->MuteFactor(channel); |
885 NetEq::BackgroundNoiseMode bgn_mode = background_noise_->mode(); | 883 NetEq::BackgroundNoiseMode bgn_mode = background_noise_->mode(); |
886 if (bgn_mode == NetEq::kBgnFade && too_many_expands && | 884 if (bgn_mode == NetEq::kBgnFade && too_many_expands && |
887 bgn_mute_factor > 0) { | 885 bgn_mute_factor > 0) { |
888 // Fade BGN to zero. | 886 // Fade BGN to zero. |
889 // Calculate muting slope, approximately -2^18 / fs_hz. | 887 // Calculate muting slope, approximately -2^18 / fs_hz. |
890 int16_t mute_slope; | 888 int mute_slope; |
891 if (fs_hz_ == 8000) { | 889 if (fs_hz_ == 8000) { |
892 mute_slope = -32; | 890 mute_slope = -32; |
893 } else if (fs_hz_ == 16000) { | 891 } else if (fs_hz_ == 16000) { |
894 mute_slope = -16; | 892 mute_slope = -16; |
895 } else if (fs_hz_ == 32000) { | 893 } else if (fs_hz_ == 32000) { |
896 mute_slope = -8; | 894 mute_slope = -8; |
897 } else { | 895 } else { |
898 mute_slope = -5; | 896 mute_slope = -5; |
899 } | 897 } |
900 // Use UnmuteSignal function with negative slope. | 898 // Use UnmuteSignal function with negative slope. |
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
940 const size_t kMaxRandSamples = RandomVector::kRandomTableSize; | 938 const size_t kMaxRandSamples = RandomVector::kRandomTableSize; |
941 while (samples_generated < length) { | 939 while (samples_generated < length) { |
942 size_t rand_length = std::min(length - samples_generated, kMaxRandSamples); | 940 size_t rand_length = std::min(length - samples_generated, kMaxRandSamples); |
943 random_vector_->IncreaseSeedIncrement(seed_increment); | 941 random_vector_->IncreaseSeedIncrement(seed_increment); |
944 random_vector_->Generate(rand_length, &random_vector[samples_generated]); | 942 random_vector_->Generate(rand_length, &random_vector[samples_generated]); |
945 samples_generated += rand_length; | 943 samples_generated += rand_length; |
946 } | 944 } |
947 } | 945 } |
948 | 946 |
949 } // namespace webrtc | 947 } // namespace webrtc |
OLD | NEW |