Index: webrtc/modules/audio_processing/aec/aec_core.cc |
diff --git a/webrtc/modules/audio_processing/aec/aec_core.cc b/webrtc/modules/audio_processing/aec/aec_core.cc |
index 8849ad787b6c0faefb6f4d1e1693f4e6a5be175b..1c8441d0ba923492cc42cf44ded685c0b7050e00 100644 |
--- a/webrtc/modules/audio_processing/aec/aec_core.cc |
+++ b/webrtc/modules/audio_processing/aec/aec_core.cc |
@@ -14,10 +14,6 @@ |
#include "webrtc/modules/audio_processing/aec/aec_core.h" |
-#ifdef WEBRTC_AEC_DEBUG_DUMP |
-#include <stdio.h> |
-#endif |
- |
#include <algorithm> |
#include <assert.h> |
#include <math.h> |
@@ -33,7 +29,6 @@ extern "C" { |
#include "webrtc/modules/audio_processing/aec/aec_common.h" |
#include "webrtc/modules/audio_processing/aec/aec_core_internal.h" |
#include "webrtc/modules/audio_processing/aec/aec_rdft.h" |
-#include "webrtc/modules/audio_processing/logging/aec_logging.h" |
#include "webrtc/modules/audio_processing/utility/delay_estimator_wrapper.h" |
#include "webrtc/system_wrappers/include/cpu_features_wrapper.h" |
#include "webrtc/typedefs.h" |
@@ -132,10 +127,6 @@ const float WebRtcAec_kNormalSmoothingCoefficients[2][2] = {{0.9f, 0.1f}, |
// Number of partitions forming the NLP's "preferred" bands. |
enum { kPrefBandSize = 24 }; |
-#ifdef WEBRTC_AEC_DEBUG_DUMP |
-extern int webrtc_aec_instance_count; |
-#endif |
- |
WebRtcAecFilterFar WebRtcAec_FilterFar; |
WebRtcAecScaleErrorSignal WebRtcAec_ScaleErrorSignal; |
WebRtcAecFilterAdaptation WebRtcAec_FilterAdaptation; |
@@ -206,7 +197,10 @@ void DivergentFilterFraction::Clear() { |
} |
// TODO(minyue): Moving some initialization from WebRtcAec_CreateAec() to ctor. |
-AecCore::AecCore() = default; |
+AecCore::AecCore(int instance_index) |
+ : data_dumper(new ApmDataDumper(instance_index)) {} |
+ |
+AecCore::~AecCore() {} |
static int CmpFloat(const void* a, const void* b) { |
const float* da = (const float*)a; |
@@ -1309,15 +1303,10 @@ static void ProcessBlock(AecCore* aec) { |
WebRtc_ReadBuffer(aec->far_time_buf, reinterpret_cast<void**>(&farend_ptr), |
farend, 1); |
-#ifdef WEBRTC_AEC_DEBUG_DUMP |
- { |
- // TODO(minyue): |farend_ptr| starts from buffered samples. This will be |
- // modified when |aec->far_time_buf| is revised. |
- RTC_AEC_DEBUG_WAV_WRITE(aec->farFile, &farend_ptr[PART_LEN], PART_LEN); |
- |
- RTC_AEC_DEBUG_WAV_WRITE(aec->nearFile, nearend_ptr, PART_LEN); |
- } |
-#endif |
+ aec->data_dumper->DumpWav("aec_far", PART_LEN, &farend_ptr[PART_LEN], |
kwiberg-webrtc
2016/05/03 00:53:00
At each of these call sites, the compiler is conve
peah-webrtc
2016/05/03 06:29:22
Great point!!!
Done.
|
+ aec->sampFreq > 16000 ? 16000 : aec->sampFreq, 1); |
+ aec->data_dumper->DumpWav("aec_near", PART_LEN, nearend_ptr, |
+ aec->sampFreq > 16000 ? 16000 : aec->sampFreq, 1); |
kwiberg-webrtc
2016/05/03 00:53:01
std::min(aec->sampFreq, 16000)
(Also in several p
peah-webrtc
2016/05/03 06:29:22
Done.
|
if (aec->metricsMode == 1) { |
// Update power levels |
@@ -1417,7 +1406,8 @@ static void ProcessBlock(AecCore* aec) { |
aec->xfBuf, nearend_ptr, aec->xPow, aec->wfBuf, |
echo_subtractor_output); |
- RTC_AEC_DEBUG_WAV_WRITE(aec->outLinearFile, echo_subtractor_output, PART_LEN); |
+ aec->data_dumper->DumpWav("aec_out_linear", PART_LEN, echo_subtractor_output, |
+ aec->sampFreq > 16000 ? 16000 : aec->sampFreq, 1); |
if (aec->metricsMode == 1) { |
UpdateLevel(&aec->linoutlevel, |
@@ -1439,12 +1429,14 @@ static void ProcessBlock(AecCore* aec) { |
WebRtc_WriteBuffer(aec->outFrBufH[i], outputH[i], PART_LEN); |
} |
- RTC_AEC_DEBUG_WAV_WRITE(aec->outFile, output, PART_LEN); |
+ aec->data_dumper->DumpWav("aec_out", PART_LEN, output, |
+ aec->sampFreq > 16000 ? 16000 : aec->sampFreq, 1); |
} |
-AecCore* WebRtcAec_CreateAec() { |
+AecCore* WebRtcAec_CreateAec(int instance_count) { |
int i; |
- AecCore* aec = new AecCore; |
+ AecCore* aec = new AecCore(instance_count); |
+ |
if (!aec) { |
return NULL; |
} |
@@ -1488,12 +1480,6 @@ AecCore* WebRtcAec_CreateAec() { |
return NULL; |
} |
-#ifdef WEBRTC_AEC_DEBUG_DUMP |
- aec->instance_index = webrtc_aec_instance_count; |
- |
- aec->farFile = aec->nearFile = aec->outFile = aec->outLinearFile = NULL; |
- aec->debug_dump_count = 0; |
-#endif |
aec->delay_estimator_farend = |
WebRtc_CreateDelayEstimatorFarend(PART_LEN1, kHistorySizeBlocks); |
if (aec->delay_estimator_farend == NULL) { |
@@ -1571,12 +1557,6 @@ void WebRtcAec_FreeAec(AecCore* aec) { |
WebRtc_FreeBuffer(aec->far_time_buf); |
- RTC_AEC_DEBUG_WAV_CLOSE(aec->farFile); |
- RTC_AEC_DEBUG_WAV_CLOSE(aec->nearFile); |
- RTC_AEC_DEBUG_WAV_CLOSE(aec->outFile); |
- RTC_AEC_DEBUG_WAV_CLOSE(aec->outLinearFile); |
- RTC_AEC_DEBUG_RAW_CLOSE(aec->e_fft_file); |
- |
WebRtc_FreeDelayEstimator(aec->delay_estimator); |
WebRtc_FreeDelayEstimatorFarend(aec->delay_estimator_farend); |
@@ -1621,6 +1601,7 @@ static void SetErrorThreshold(AecCore* aec) { |
int WebRtcAec_InitAec(AecCore* aec, int sampFreq) { |
int i; |
+ aec->data_dumper->InitiateNewSetOfRecordings(); |
aec->sampFreq = sampFreq; |
@@ -1643,27 +1624,6 @@ int WebRtcAec_InitAec(AecCore* aec, int sampFreq) { |
// Initialize far-end buffers. |
WebRtc_InitBuffer(aec->far_time_buf); |
-#ifdef WEBRTC_AEC_DEBUG_DUMP |
- { |
- int process_rate = sampFreq > 16000 ? 16000 : sampFreq; |
- RTC_AEC_DEBUG_WAV_REOPEN("aec_far", aec->instance_index, |
- aec->debug_dump_count, process_rate, |
- &aec->farFile); |
- RTC_AEC_DEBUG_WAV_REOPEN("aec_near", aec->instance_index, |
- aec->debug_dump_count, process_rate, |
- &aec->nearFile); |
- RTC_AEC_DEBUG_WAV_REOPEN("aec_out", aec->instance_index, |
- aec->debug_dump_count, process_rate, |
- &aec->outFile); |
- RTC_AEC_DEBUG_WAV_REOPEN("aec_out_linear", aec->instance_index, |
- aec->debug_dump_count, process_rate, |
- &aec->outLinearFile); |
- } |
- |
- RTC_AEC_DEBUG_RAW_OPEN("aec_e_fft", aec->debug_dump_count, &aec->e_fft_file); |
- |
- ++aec->debug_dump_count; |
-#endif |
aec->system_delay = 0; |
if (WebRtc_InitDelayEstimatorFarend(aec->delay_estimator_farend) != 0) { |