| Index: webrtc/modules/audio_processing/noise_suppression_impl.cc
|
| diff --git a/webrtc/modules/audio_processing/noise_suppression_impl.cc b/webrtc/modules/audio_processing/noise_suppression_impl.cc
|
| index 15019112647efa1b02a15d65401b5499f21e670d..6f9b4e3c9618640f7d072fef3bfe15d81f3f37d3 100644
|
| --- a/webrtc/modules/audio_processing/noise_suppression_impl.cc
|
| +++ b/webrtc/modules/audio_processing/noise_suppression_impl.cc
|
| @@ -63,7 +63,7 @@ int NoiseSuppressionImpl::AnalyzeCaptureAudio(AudioBuffer* audio) {
|
| assert(audio->num_frames_per_band() <= 160);
|
| assert(audio->num_channels() == num_handles());
|
|
|
| - for (int i = 0; i < num_handles(); ++i) {
|
| + for (size_t i = 0; i < num_handles(); ++i) {
|
| Handle* my_handle = static_cast<Handle*>(handle(i));
|
|
|
| WebRtcNs_Analyze(my_handle, audio->split_bands_const_f(i)[kBand0To8kHz]);
|
| @@ -79,7 +79,7 @@ int NoiseSuppressionImpl::ProcessCaptureAudio(AudioBuffer* audio) {
|
| assert(audio->num_frames_per_band() <= 160);
|
| assert(audio->num_channels() == num_handles());
|
|
|
| - for (int i = 0; i < num_handles(); ++i) {
|
| + for (size_t i = 0; i < num_handles(); ++i) {
|
| Handle* my_handle = static_cast<Handle*>(handle(i));
|
| #if defined(WEBRTC_NS_FLOAT)
|
| WebRtcNs_Process(my_handle,
|
| @@ -122,7 +122,7 @@ NoiseSuppression::Level NoiseSuppressionImpl::level() const {
|
| float NoiseSuppressionImpl::speech_probability() const {
|
| #if defined(WEBRTC_NS_FLOAT)
|
| float probability_average = 0.0f;
|
| - for (int i = 0; i < num_handles(); i++) {
|
| + for (size_t i = 0; i < num_handles(); i++) {
|
| Handle* my_handle = static_cast<Handle*>(handle(i));
|
| probability_average += WebRtcNs_prior_speech_probability(my_handle);
|
| }
|
| @@ -169,7 +169,7 @@ int NoiseSuppressionImpl::ConfigureHandle(void* handle) const {
|
| #endif
|
| }
|
|
|
| -int NoiseSuppressionImpl::num_handles_required() const {
|
| +size_t NoiseSuppressionImpl::num_handles_required() const {
|
| return apm_->num_output_channels();
|
| }
|
|
|
|
|