| Index: webrtc/modules/audio_processing/include/audio_processing.h
|
| diff --git a/webrtc/modules/audio_processing/include/audio_processing.h b/webrtc/modules/audio_processing/include/audio_processing.h
|
| index 6fa1c96c0771c14d141836dc0c88bf69ec9f5aea..800aa1917538f7d83cbf921d6658f4000f31b97a 100644
|
| --- a/webrtc/modules/audio_processing/include/audio_processing.h
|
| +++ b/webrtc/modules/audio_processing/include/audio_processing.h
|
| @@ -29,6 +29,9 @@ class AudioFrame;
|
| template<typename T>
|
| class Beamformer;
|
|
|
| +class StreamConfig;
|
| +struct ProcessingConfig;
|
| +
|
| class EchoCancellation;
|
| class EchoControlMobile;
|
| class GainControl;
|
| @@ -84,7 +87,7 @@ static const int kAgcStartupMinVolume = 0;
|
| #endif // defined(WEBRTC_CHROMIUM_BUILD)
|
| struct ExperimentalAgc {
|
| ExperimentalAgc() : enabled(true), startup_min_volume(kAgcStartupMinVolume) {}
|
| - ExperimentalAgc(bool enabled)
|
| + explicit ExperimentalAgc(bool enabled)
|
| : enabled(enabled), startup_min_volume(kAgcStartupMinVolume) {}
|
| ExperimentalAgc(bool enabled, int startup_min_volume)
|
| : enabled(enabled), startup_min_volume(startup_min_volume) {}
|
| @@ -236,10 +239,15 @@ class AudioProcessing {
|
| // The int16 interfaces require:
|
| // - only |NativeRate|s be used
|
| // - that the input, output and reverse rates must match
|
| - // - that |output_layout| matches |input_layout|
|
| + // - that |processing_config.output_stream()| matches
|
| + // |processing_config.input_stream()|.
|
| //
|
| - // The float interfaces accept arbitrary rates and support differing input
|
| - // and output layouts, but the output may only remove channels, not add.
|
| + // The float interfaces accept arbitrary rates and support differing input and
|
| + // output layouts, but the output must have either one channel or the same
|
| + // number of channels as the input.
|
| + virtual int Initialize(const ProcessingConfig& processing_config) = 0;
|
| +
|
| + // Initialize with unpacked parameters. See Initialize() above for details.
|
| virtual int Initialize(int input_sample_rate_hz,
|
| int output_sample_rate_hz,
|
| int reverse_sample_rate_hz,
|
| @@ -292,8 +300,8 @@ class AudioProcessing {
|
| // |input_layout|. At output, the channels will be arranged according to
|
| // |output_layout| at |output_sample_rate_hz| in |dest|.
|
| //
|
| - // The output layout may only remove channels, not add. |src| and |dest|
|
| - // may use the same memory, if desired.
|
| + // The output layout must have one channel or as many channels as the input.
|
| + // |src| and |dest| may use the same memory, if desired.
|
| virtual int ProcessStream(const float* const* src,
|
| int samples_per_channel,
|
| int input_sample_rate_hz,
|
| @@ -302,6 +310,17 @@ class AudioProcessing {
|
| ChannelLayout output_layout,
|
| float* const* dest) = 0;
|
|
|
| + // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
|
| + // |src| points to a channel buffer, arranged according to
|
| + // |processing_config.input_stream()|. At output, the channels will be
|
| + // arranged according to |processing_config.output_stream()| in |dest|.
|
| + //
|
| + // The output must have one channel or as many channels as the input. |src|
|
| + // and |dest| may use the same memory, if desired.
|
| + virtual int ProcessStream(const float* const* src,
|
| + const ProcessingConfig& processing_config,
|
| + float* const* dest) = 0;
|
| +
|
| // Analyzes a 10 ms |frame| of the reverse direction audio stream. The frame
|
| // will not be modified. On the client-side, this is the far-end (or to be
|
| // rendered) audio.
|
| @@ -326,6 +345,11 @@ class AudioProcessing {
|
| int sample_rate_hz,
|
| ChannelLayout layout) = 0;
|
|
|
| + // Accepts deinterleaved float audio with the range [-1, 1]. Each element of
|
| + // |data| points to a channel buffer, arranged according to |reverse_config|.
|
| + virtual int AnalyzeReverseStream(const float* const* data,
|
| + const StreamConfig& reverse_config) = 0;
|
| +
|
| // This must be called if and only if echo processing is enabled.
|
| //
|
| // Sets the |delay| in ms between AnalyzeReverseStream() receiving a far-end
|
| @@ -432,6 +456,91 @@ class AudioProcessing {
|
| static const int kChunkSizeMs = 10;
|
| };
|
|
|
| +class StreamConfig {
|
| + public:
|
| + StreamConfig(int sample_rate_hz = 0,
|
| + int num_channels = 0,
|
| + bool has_keyboard = false)
|
| + : sample_rate_hz_(sample_rate_hz),
|
| + num_channels_(num_channels),
|
| + has_keyboard_(has_keyboard),
|
| + samples_per_channel_(calculate_samples_per_channel(sample_rate_hz)) {}
|
| +
|
| + void set_sample_rate_hz(int value) {
|
| + sample_rate_hz_ = value;
|
| + samples_per_channel_ = calculate_samples_per_channel(value);
|
| + }
|
| + void set_num_channels(int value) { num_channels_ = value; }
|
| + void set_has_keyboard(bool value) { has_keyboard_ = value; }
|
| +
|
| + int sample_rate_hz() const { return sample_rate_hz_; }
|
| + int num_channels() const { return num_channels_; }
|
| + bool has_keyboard() const { return has_keyboard_; }
|
| + int samples_per_channel() const { return samples_per_channel_; }
|
| +
|
| + bool operator==(const StreamConfig& other) const {
|
| + return sample_rate_hz_ == other.sample_rate_hz_ &&
|
| + num_channels_ == other.num_channels_ &&
|
| + has_keyboard_ == other.has_keyboard_;
|
| + }
|
| +
|
| + bool operator!=(const StreamConfig& other) const { return !(*this == other); }
|
| +
|
| + private:
|
| + static int calculate_samples_per_channel(int sample_rate_hz) {
|
| + return sample_rate_hz * (AudioProcessing::kChunkSizeMs / 1000.0);
|
| + }
|
| +
|
| + int sample_rate_hz_;
|
| + int num_channels_;
|
| + bool has_keyboard_;
|
| + int samples_per_channel_;
|
| +};
|
| +
|
| +struct ProcessingConfig {
|
| + enum StreamName {
|
| + kInputStream,
|
| + kOutputStream,
|
| + kReverseStream,
|
| + kNumStreamNames,
|
| + };
|
| +
|
| + const StreamConfig& input_stream() const {
|
| + return streams[StreamName::kInputStream];
|
| + }
|
| + const StreamConfig& output_stream() const {
|
| + return streams[StreamName::kOutputStream];
|
| + }
|
| + const StreamConfig& reverse_stream() const {
|
| + return streams[StreamName::kReverseStream];
|
| + }
|
| +
|
| + StreamConfig& input_stream() {
|
| + return streams[StreamName::kInputStream];
|
| + }
|
| + StreamConfig& output_stream() {
|
| + return streams[StreamName::kOutputStream];
|
| + }
|
| + StreamConfig& reverse_stream() {
|
| + return streams[StreamName::kReverseStream];
|
| + }
|
| +
|
| + bool operator==(const ProcessingConfig& other) const {
|
| + for (int i = 0; i < StreamName::kNumStreamNames; ++i) {
|
| + if (this->streams[i] != other.streams[i]) {
|
| + return false;
|
| + }
|
| + }
|
| + return true;
|
| + }
|
| +
|
| + bool operator!=(const ProcessingConfig& other) const {
|
| + return !(*this == other);
|
| + }
|
| +
|
| + StreamConfig streams[StreamName::kNumStreamNames];
|
| +};
|
| +
|
| // The acoustic echo cancellation (AEC) component provides better performance
|
| // than AECM but also requires more processing power and is dependent on delay
|
| // stability and reporting accuracy. As such it is well-suited and recommended
|
|
|