Index: webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
diff --git a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
index a0d1720e460f9c90231ca583f9d001df6e8cc826..9eb7a11524d4b13fcd1a23c82c9e0387f35fa99f 100644 |
--- a/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
+++ b/webrtc/modules/audio_coding/codecs/g722/audio_encoder_g722.cc |
@@ -19,7 +19,7 @@ namespace webrtc { |
namespace { |
-const int kSampleRateHz = 16000; |
+const size_t kSampleRateHz = 16000; |
} // namespace |
@@ -40,13 +40,14 @@ AudioEncoderG722::EncoderState::~EncoderState() { |
AudioEncoderG722::AudioEncoderG722(const Config& config) |
: num_channels_(config.num_channels), |
payload_type_(config.payload_type), |
- num_10ms_frames_per_packet_(config.frame_size_ms / 10), |
+ num_10ms_frames_per_packet_( |
+ static_cast<size_t>(config.frame_size_ms / 10)), |
num_10ms_frames_buffered_(0), |
first_timestamp_in_buffer_(0), |
encoders_(new EncoderState[num_channels_]), |
interleave_buffer_(2 * num_channels_) { |
CHECK(config.IsOk()); |
- const int samples_per_channel = |
+ const size_t samples_per_channel = |
kSampleRateHz / 100 * num_10ms_frames_per_packet_; |
for (int i = 0; i < num_channels_; ++i) { |
encoders_[i].speech_buffer.reset(new int16_t[samples_per_channel]); |
@@ -71,14 +72,14 @@ int AudioEncoderG722::NumChannels() const { |
} |
size_t AudioEncoderG722::MaxEncodedBytes() const { |
- return static_cast<size_t>(SamplesPerChannel() / 2 * num_channels_); |
+ return SamplesPerChannel() / 2 * num_channels_; |
} |
-int AudioEncoderG722::Num10MsFramesInNextPacket() const { |
+size_t AudioEncoderG722::Num10MsFramesInNextPacket() const { |
return num_10ms_frames_per_packet_; |
} |
-int AudioEncoderG722::Max10MsFramesInAPacket() const { |
+size_t AudioEncoderG722::Max10MsFramesInAPacket() const { |
return num_10ms_frames_per_packet_; |
} |
@@ -98,8 +99,8 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal( |
first_timestamp_in_buffer_ = rtp_timestamp; |
// Deinterleave samples and save them in each channel's buffer. |
- const int start = kSampleRateHz / 100 * num_10ms_frames_buffered_; |
- for (int i = 0; i < kSampleRateHz / 100; ++i) |
+ const size_t start = kSampleRateHz / 100 * num_10ms_frames_buffered_; |
+ for (size_t i = 0; i < kSampleRateHz / 100; ++i) |
for (int j = 0; j < num_channels_; ++j) |
encoders_[j].speech_buffer[start + i] = audio[i * num_channels_ + j]; |
@@ -111,19 +112,18 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal( |
// Encode each channel separately. |
CHECK_EQ(num_10ms_frames_buffered_, num_10ms_frames_per_packet_); |
num_10ms_frames_buffered_ = 0; |
- const int samples_per_channel = SamplesPerChannel(); |
+ const size_t samples_per_channel = SamplesPerChannel(); |
for (int i = 0; i < num_channels_; ++i) { |
- const int encoded = WebRtcG722_Encode( |
+ const size_t encoded = WebRtcG722_Encode( |
encoders_[i].encoder, encoders_[i].speech_buffer.get(), |
samples_per_channel, encoders_[i].encoded_buffer.data<uint8_t>()); |
- CHECK_GE(encoded, 0); |
CHECK_EQ(encoded, samples_per_channel / 2); |
} |
// Interleave the encoded bytes of the different channels. Each separate |
// channel and the interleaved stream encodes two samples per byte, most |
// significant half first. |
- for (int i = 0; i < samples_per_channel / 2; ++i) { |
+ for (size_t i = 0; i < samples_per_channel / 2; ++i) { |
for (int j = 0; j < num_channels_; ++j) { |
uint8_t two_samples = encoders_[j].encoded_buffer.data()[i]; |
interleave_buffer_.data()[j] = two_samples >> 4; |
@@ -140,7 +140,7 @@ AudioEncoder::EncodedInfo AudioEncoderG722::EncodeInternal( |
return info; |
} |
-int AudioEncoderG722::SamplesPerChannel() const { |
+size_t AudioEncoderG722::SamplesPerChannel() const { |
return kSampleRateHz / 100 * num_10ms_frames_per_packet_; |
} |