OLD | NEW |
---|---|
(Empty) | |
1 /* | |
2 * Copyright (c) 2017 The WebRTC project authors. All Rights Reserved. | |
3 * | |
4 * Use of this source code is governed by a BSD-style license | |
5 * that can be found in the LICENSE file in the root of the source | |
6 * tree. An additional intellectual property rights grant can be found | |
7 * in the file PATENTS. All contributing project authors may | |
8 * be found in the AUTHORS file in the root of the source tree. | |
9 */ | |
10 | |
11 #ifndef WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_ | |
12 #define WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_ | |
13 | |
14 #include <memory> | |
15 #include <string> | |
16 #include <vector> | |
17 | |
18 #include "webrtc/base/array_view.h" | |
19 | |
20 namespace webrtc { | |
21 | |
22 class AudioFrame; | |
23 | |
24 // Struct for passing current config from APM without having to | |
25 // include protobuf headers. | |
26 struct InternalAPMConfig { | |
27 InternalAPMConfig(); | |
28 InternalAPMConfig(const InternalAPMConfig&); | |
29 InternalAPMConfig(InternalAPMConfig&&); | |
30 | |
31 InternalAPMConfig& operator=(const InternalAPMConfig&) = delete; | |
32 InternalAPMConfig& operator=(const InternalAPMConfig&&) = delete; | |
33 | |
34 bool aec_enabled = false; | |
35 bool aec_delay_agnostic_enabled = false; | |
36 bool aec_drift_compensation_enabled = false; | |
37 bool aec_extended_filter_enabled = false; | |
38 int aec_suppression_level = 0; | |
39 bool aecm_enabled = false; | |
40 bool aecm_comfort_noise_enabled = false; | |
41 int aecm_routing_mode = 0; | |
42 bool agc_enabled = false; | |
43 int agc_mode = 0; | |
44 bool agc_limiter_enabled = false; | |
45 bool hpf_enabled = false; | |
46 bool ns_enabled = false; | |
47 int ns_level = 0; | |
48 bool transient_suppression_enabled = false; | |
49 bool intelligibility_enhancer_enabled = false; | |
50 bool noise_robust_agc_enabled = false; | |
51 std::string experiments_description = ""; | |
52 }; | |
53 | |
54 struct InternalAPMStreamsConfig { | |
55 int input_sample_rate = 0; | |
56 int output_sample_rate = 0; | |
57 int render_input_sample_rate = 0; | |
58 int render_output_sample_rate = 0; | |
59 | |
60 size_t input_num_channels = 0; | |
61 size_t output_num_channels = 0; | |
62 size_t render_input_num_channels = 0; | |
63 size_t render_output_num_channels = 0; | |
64 }; | |
65 | |
66 // Class to pass audio data in float** format. This is to avoid | |
peah-webrtc
2017/05/15 05:32:51
Since FloatAudioFrame is defined and used only wit
peah-webrtc
2017/05/15 07:25:18
I now saw your comment about this in the upcoming
aleloi
2017/05/15 13:20:52
Acknowledged.
| |
67 // dependence on AudioBuffer, and avoid problems associated with | |
68 // rtc::ArrayView<rtc::ArrayView>. | |
69 class FloatAudioFrame { | |
70 public: | |
71 // |num_channels| and |channel_size| describe the float** | |
72 // |audio_samples|. |audio_samples| is assumed to point to a | |
73 // two-dimensional |num_channels * channel_size| array of floats. | |
74 FloatAudioFrame(const float* const* audio_samples, | |
75 size_t num_channels, | |
76 size_t channel_size) | |
77 : audio_samples_(audio_samples), | |
78 num_channels_(num_channels), | |
79 channel_size_(channel_size) {} | |
80 | |
81 size_t num_channels() const { return num_channels_; } | |
82 | |
83 rtc::ArrayView<const float> channel(size_t idx) const { | |
84 RTC_DCHECK_LE(0, idx); | |
85 RTC_DCHECK_LE(idx, num_channels_); | |
86 return rtc::ArrayView<const float>(audio_samples_[idx], channel_size_); | |
87 } | |
88 | |
89 private: | |
90 const float* const* audio_samples_; | |
91 size_t num_channels_; | |
92 size_t channel_size_; | |
93 }; | |
94 | |
95 // An interface for recording configuration and input/output streams | |
96 // of the Audio Processing Module. The recordings are called | |
97 // 'aec-dumps' and are stored in a protobuf format defined in | |
98 // debug.proto. | |
99 class AecDump { | |
100 public: | |
101 // A capture stream frame is logged before and after processing in | |
102 // the same protobuf message. To facilitate that, a CaptureStreamInfo | |
103 // instance is first filled with Input, then Output. | |
104 // | |
105 // To log an input/output pair, first call | |
106 // AecDump::GetCaptureStreamInfo. Add the input and output to the | |
107 // returned CaptureStreamInfo pointer. Then call | |
108 // AecDump::WriteCaptureStreamMessage. | |
109 class CaptureStreamInfo { | |
110 public: | |
111 virtual ~CaptureStreamInfo() = default; | |
112 virtual void AddInput(const FloatAudioFrame& src) = 0; | |
113 virtual void AddOutput(const FloatAudioFrame& src) = 0; | |
114 | |
115 virtual void AddInput(const AudioFrame& frame) = 0; | |
116 virtual void AddOutput(const AudioFrame& frame) = 0; | |
117 | |
118 virtual void set_delay(int delay) = 0; | |
119 virtual void set_drift(int drift) = 0; | |
120 virtual void set_level(int level) = 0; | |
121 virtual void set_keypress(bool keypress) = 0; | |
122 }; | |
123 | |
124 virtual ~AecDump() = default; | |
125 | |
126 virtual CaptureStreamInfo* GetCaptureStreamInfo() = 0; | |
127 | |
128 // The Write* methods are always safe to call concurrently or | |
129 // otherwise for all implementing subclasses. The intended mode of | |
130 // operation is to create a protobuf object from the input, and send | |
131 // it away to be written to file asynchronously. | |
132 virtual void WriteInitMessage( | |
133 const InternalAPMStreamsConfig& streams_config) = 0; | |
134 | |
135 virtual void WriteRenderStreamMessage(const AudioFrame& frame) = 0; | |
136 | |
137 virtual void WriteRenderStreamMessage(const FloatAudioFrame& src) = 0; | |
138 | |
139 virtual void WriteCaptureStreamMessage() = 0; | |
140 | |
141 // If not |forced|, only writes the current config if it is | |
142 // different from the last saved one; if |forced|, writes the config | |
143 // regardless of the last saved. | |
144 virtual void WriteConfig(const InternalAPMConfig& config, bool forced) = 0; | |
145 }; | |
146 } // namespace webrtc | |
147 | |
148 #endif // WEBRTC_MODULES_AUDIO_PROCESSING_INCLUDE_AEC_DUMP_H_ | |
OLD | NEW |