| Index: webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
|
| diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
|
| index d7203b9da3ea8b5adb9f1998d285cd0320fbce4d..57d95947c356a9257797be5de9615a9cce0f5970 100644
|
| --- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
|
| +++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc
|
| @@ -91,13 +91,16 @@ int AudioEncoderG722::GetTargetBitrate() const {
|
| return static_cast<int>(64000 * NumChannels());
|
| }
|
|
|
| +void AudioEncoderG722::Reset() {
|
| + num_10ms_frames_buffered_ = 0;
|
| + for (size_t i = 0; i < num_channels_; ++i)
|
| + RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
|
| +}
|
| +
|
| AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
|
| uint32_t rtp_timestamp,
|
| rtc::ArrayView<const int16_t> audio,
|
| - size_t max_encoded_bytes,
|
| - uint8_t* encoded) {
|
| - RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes());
|
| -
|
| + rtc::Buffer* encoded) {
|
| if (num_10ms_frames_buffered_ == 0)
|
| first_timestamp_in_buffer_ = rtp_timestamp;
|
|
|
| @@ -117,38 +120,38 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal(
|
| num_10ms_frames_buffered_ = 0;
|
| const size_t samples_per_channel = SamplesPerChannel();
|
| for (size_t i = 0; i < num_channels_; ++i) {
|
| - const size_t encoded = WebRtcG722_Encode(
|
| + const size_t bytes_encoded = WebRtcG722_Encode(
|
| encoders_[i].encoder, encoders_[i].speech_buffer.get(),
|
| samples_per_channel, encoders_[i].encoded_buffer.data());
|
| - RTC_CHECK_EQ(encoded, samples_per_channel / 2);
|
| + RTC_CHECK_EQ(bytes_encoded, samples_per_channel / 2);
|
| }
|
|
|
| - // Interleave the encoded bytes of the different channels. Each separate
|
| - // channel and the interleaved stream encodes two samples per byte, most
|
| - // significant half first.
|
| - for (size_t i = 0; i < samples_per_channel / 2; ++i) {
|
| - for (size_t j = 0; j < num_channels_; ++j) {
|
| - uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
|
| - interleave_buffer_.data()[j] = two_samples >> 4;
|
| - interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
|
| - }
|
| - for (size_t j = 0; j < num_channels_; ++j)
|
| - encoded[i * num_channels_ + j] = interleave_buffer_.data()[2 * j] << 4 |
|
| - interleave_buffer_.data()[2 * j + 1];
|
| - }
|
| + const size_t bytes_to_encode = samples_per_channel / 2 * num_channels_;
|
| EncodedInfo info;
|
| - info.encoded_bytes = samples_per_channel / 2 * num_channels_;
|
| + info.encoded_bytes = encoded->AppendData(
|
| + bytes_to_encode, [&] (rtc::ArrayView<uint8_t> encoded) {
|
| + // Interleave the encoded bytes of the different channels. Each separate
|
| + // channel and the interleaved stream encodes two samples per byte, most
|
| + // significant half first.
|
| + for (size_t i = 0; i < samples_per_channel / 2; ++i) {
|
| + for (size_t j = 0; j < num_channels_; ++j) {
|
| + uint8_t two_samples = encoders_[j].encoded_buffer.data()[i];
|
| + interleave_buffer_.data()[j] = two_samples >> 4;
|
| + interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf;
|
| + }
|
| + for (size_t j = 0; j < num_channels_; ++j)
|
| + encoded[i * num_channels_ + j] =
|
| + interleave_buffer_.data()[2 * j] << 4 |
|
| + interleave_buffer_.data()[2 * j + 1];
|
| + }
|
| +
|
| + return bytes_to_encode;
|
| + });
|
| info.encoded_timestamp = first_timestamp_in_buffer_;
|
| info.payload_type = payload_type_;
|
| return info;
|
| }
|
|
|
| -void AudioEncoderG722::Reset() {
|
| - num_10ms_frames_buffered_ = 0;
|
| - for (size_t i = 0; i < num_channels_; ++i)
|
| - RTC_CHECK_EQ(0, WebRtcG722_EncoderInit(encoders_[i].encoder));
|
| -}
|
| -
|
| AudioEncoderG722::EncoderState::EncoderState() {
|
| RTC_CHECK_EQ(0, WebRtcG722_CreateEncoder(&encoder));
|
| }
|
|
|