Chromium Code Reviews| Index: webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
| diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
| index d7203b9da3ea8b5adb9f1998d285cd0320fbce4d..85caf07c4ae8fc631a3fd1a94228c355719b47b1 100644 |
| --- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
| +++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
| @@ -94,10 +94,7 @@ int AudioEncoderG722::GetTargetBitrate() const { |
| AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal( |
| uint32_t rtp_timestamp, |
| rtc::ArrayView<const int16_t> audio, |
| - size_t max_encoded_bytes, |
| - uint8_t* encoded) { |
| - RTC_CHECK_GE(max_encoded_bytes, MaxEncodedBytes()); |
| - |
| + rtc::Buffer* encoded) { |
| if (num_10ms_frames_buffered_ == 0) |
| first_timestamp_in_buffer_ = rtp_timestamp; |
| @@ -117,27 +114,35 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal( |
| num_10ms_frames_buffered_ = 0; |
| const size_t samples_per_channel = SamplesPerChannel(); |
| for (size_t i = 0; i < num_channels_; ++i) { |
| - const size_t encoded = WebRtcG722_Encode( |
| + const size_t bytes_encoded = WebRtcG722_Encode( |
|
ossu
2016/02/24 13:24:33
This used to shadow the input parameter encoded, w
kwiberg-webrtc
2016/02/25 00:29:04
Acknowledged.
|
| encoders_[i].encoder, encoders_[i].speech_buffer.get(), |
| samples_per_channel, encoders_[i].encoded_buffer.data()); |
| - RTC_CHECK_EQ(encoded, samples_per_channel / 2); |
| + RTC_CHECK_EQ(bytes_encoded, samples_per_channel / 2); |
| } |
| - // Interleave the encoded bytes of the different channels. Each separate |
| - // channel and the interleaved stream encodes two samples per byte, most |
| - // significant half first. |
| - for (size_t i = 0; i < samples_per_channel / 2; ++i) { |
| - for (size_t j = 0; j < num_channels_; ++j) { |
| - uint8_t two_samples = encoders_[j].encoded_buffer.data()[i]; |
| - interleave_buffer_.data()[j] = two_samples >> 4; |
| - interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf; |
| - } |
| - for (size_t j = 0; j < num_channels_; ++j) |
| - encoded[i * num_channels_ + j] = interleave_buffer_.data()[2 * j] << 4 | |
| - interleave_buffer_.data()[2 * j + 1]; |
| - } |
| + const size_t bytes_to_encode = samples_per_channel / 2 * num_channels_; |
| + |
| + encoded->AppendData(bytes_to_encode, [&] (rtc::ArrayView<uint8_t> encoded) { |
| + // Interleave the encoded bytes of the different channels. Each separate |
| + // channel and the interleaved stream encodes two samples per byte, most |
| + // significant half first. |
| + for (size_t i = 0; i < samples_per_channel / 2; ++i) { |
| + for (size_t j = 0; j < num_channels_; ++j) { |
| + uint8_t two_samples = encoders_[j].encoded_buffer.data()[i]; |
| + interleave_buffer_.data()[j] = two_samples >> 4; |
| + interleave_buffer_.data()[num_channels_ + j] = two_samples & 0xf; |
| + } |
| + for (size_t j = 0; j < num_channels_; ++j) |
| + encoded[i * num_channels_ + j] = |
| + interleave_buffer_.data()[2 * j] << 4 | |
| + interleave_buffer_.data()[2 * j + 1]; |
| + } |
| + |
| + return bytes_to_encode; |
| + }); |
| + |
| EncodedInfo info; |
| - info.encoded_bytes = samples_per_channel / 2 * num_channels_; |
| + info.encoded_bytes = bytes_to_encode; |
|
kwiberg-webrtc
2016/02/25 00:29:04
Have the AppendData call here and set .encoded_byt
ossu
2016/02/25 10:39:51
Yeah.
|
| info.encoded_timestamp = first_timestamp_in_buffer_; |
| info.payload_type = payload_type_; |
| return info; |