Index: webrtc/modules/audio_processing/include/aec_dump.h |
diff --git a/webrtc/modules/audio_processing/include/aec_dump.h b/webrtc/modules/audio_processing/include/aec_dump.h |
new file mode 100644 |
index 0000000000000000000000000000000000000000..68d50d0490502ec3da6ccebf99c93cc736ba6d92 |
--- /dev/null |
+++ b/webrtc/modules/audio_processing/include/aec_dump.h |
@@ -0,0 +1,148 @@ |
+/* |
+ * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. |
+ * |
+ * Use of this source code is governed by a BSD-style license |
+ * that can be found in the LICENSE file in the root of the source |
+ * tree. An additional intellectual property rights grant can be found |
+ * in the file PATENTS. All contributing project authors may |
+ * be found in the AUTHORS file in the root of the source tree. |
+ */ |
+ |
+#ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_ |
+#define WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_ |
+ |
+#include <memory> |
+#include <string> |
+#include <vector> |
+ |
+#include "webrtc/base/array_view.h" |
+ |
+namespace webrtc { |
+ |
+class AudioFrame; |
+ |
+// Struct for passing current config from APM without having to |
+// include protobuf headers. |
+struct InternalAPMConfig { |
+ InternalAPMConfig(); |
+ InternalAPMConfig(const InternalAPMConfig&); |
+ InternalAPMConfig(InternalAPMConfig&&); |
+ |
+ InternalAPMConfig& operator=(const InternalAPMConfig&) = delete; |
+ InternalAPMConfig& operator=(const InternalAPMConfig&&) = delete; |
+ |
+ bool aec_enabled = false; |
+ bool aec_delay_agnostic_enabled = false; |
+ bool aec_drift_compensation_enabled = false; |
+ bool aec_extended_filter_enabled = false; |
+ int aec_suppression_level = 0; |
+ bool aecm_enabled = false; |
+ bool aecm_comfort_noise_enabled = false; |
+ int aecm_routing_mode = 0; |
+ bool agc_enabled = false; |
+ int agc_mode = 0; |
+ bool agc_limiter_enabled = false; |
+ bool hpf_enabled = false; |
+ bool ns_enabled = false; |
+ int ns_level = 0; |
+ bool transient_suppression_enabled = false; |
+ bool intelligibility_enhancer_enabled = false; |
+ bool noise_robust_agc_enabled = false; |
+ std::string experiments_description = ""; |
+}; |
+ |
+struct InternalAPMStreamsConfig { |
+ int input_sample_rate = 0; |
+ int output_sample_rate = 0; |
+ int render_input_sample_rate = 0; |
+ int render_output_sample_rate = 0; |
+ |
+ size_t input_num_channels = 0; |
+ size_t output_num_channels = 0; |
+ size_t render_input_num_channels = 0; |
+ size_t render_output_num_channels = 0; |
+}; |
+ |
+// Class to pass audio data in float** format. This is to avoid |
peah-webrtc
2017/05/15 05:32:51
Since FloatAudioFrame is defined and used only wit
peah-webrtc
2017/05/15 07:25:18
I now saw your comment about this in the upcoming
aleloi
2017/05/15 13:20:52
Acknowledged.
|
+// dependence on AudioBuffer, and avoid problems associated with |
+// rtc::ArrayView<rtc::ArrayView>. |
+class FloatAudioFrame { |
+ public: |
+ // |num_channels| and |channel_size| describe the float** |
+ // |audio_samples|. |audio_samples| is assumed to point to a |
+ // two-dimensional |num_channels * channel_size| array of floats. |
+ FloatAudioFrame(const float* const* audio_samples, |
+ size_t num_channels, |
+ size_t channel_size) |
+ : audio_samples_(audio_samples), |
+ num_channels_(num_channels), |
+ channel_size_(channel_size) {} |
+ |
+ size_t num_channels() const { return num_channels_; } |
+ |
+ rtc::ArrayView<const float> channel(size_t idx) const { |
+ RTC_DCHECK_LE(0, idx); |
+ RTC_DCHECK_LE(idx, num_channels_); |
+ return rtc::ArrayView<const float>(audio_samples_[idx], channel_size_); |
+ } |
+ |
+ private: |
+ const float* const* audio_samples_; |
+ size_t num_channels_; |
+ size_t channel_size_; |
+}; |
+ |
+// An interface for recording configuration and input/output streams |
+// of the Audio Processing Module. The recordings are called |
+// 'aec-dumps' and are stored in a protobuf format defined in |
+// debug.proto. |
+class AecDump { |
+ public: |
+ // A capture stream frame is logged before and after processing in |
+ // the same protobuf message. To facilitate that, a CaptureStreamInfo |
+ // instance is first filled with Input, then Output. |
+ // |
+ // To log an input/output pair, first call |
+ // AecDump::GetCaptureStreamInfo. Add the input and output to the |
+ // returned CaptureStreamInfo pointer. Then call |
+ // AecDump::WriteCaptureStreamMessage. |
+ class CaptureStreamInfo { |
+ public: |
+ virtual ~CaptureStreamInfo() = default; |
+ virtual void AddInput(const FloatAudioFrame& src) = 0; |
+ virtual void AddOutput(const FloatAudioFrame& src) = 0; |
+ |
+ virtual void AddInput(const AudioFrame& frame) = 0; |
+ virtual void AddOutput(const AudioFrame& frame) = 0; |
+ |
+ virtual void set_delay(int delay) = 0; |
+ virtual void set_drift(int drift) = 0; |
+ virtual void set_level(int level) = 0; |
+ virtual void set_keypress(bool keypress) = 0; |
+ }; |
+ |
+ virtual ~AecDump() = default; |
+ |
+ virtual CaptureStreamInfo* GetCaptureStreamInfo() = 0; |
+ |
+ // The Write* methods are always safe to call concurrently or |
+ // otherwise for all implementing subclasses. The intended mode of |
+ // operation is to create a protobuf object from the input, and send |
+ // it away to be written to file asynchronously. |
+ virtual void WriteInitMessage( |
+ const InternalAPMStreamsConfig& streams_config) = 0; |
+ |
+ virtual void WriteRenderStreamMessage(const AudioFrame& frame) = 0; |
+ |
+ virtual void WriteRenderStreamMessage(const FloatAudioFrame& src) = 0; |
+ |
+ virtual void WriteCaptureStreamMessage() = 0; |
+ |
+ // If not |forced|, only writes the current config if it is |
+ // different from the last saved one; if |forced|, writes the config |
+ // regardless of the last saved. |
+ virtual void WriteConfig(const InternalAPMConfig& config, bool forced) = 0; |
+}; |
+} // namespace webrtc |
+ |
+#endif // WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_ |