| OLD | NEW |
| 1 /* | 1 /* |
| 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2013 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 443 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 454 audio_frame->vad_activity_ = previous_audio_activity_; | 454 audio_frame->vad_activity_ = previous_audio_activity_; |
| 455 SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame); | 455 SetAudioFrameActivityAndType(vad_enabled_, type, audio_frame); |
| 456 previous_audio_activity_ = audio_frame->vad_activity_; | 456 previous_audio_activity_ = audio_frame->vad_activity_; |
| 457 call_stats_.DecodedByNetEq(audio_frame->speech_type_); | 457 call_stats_.DecodedByNetEq(audio_frame->speech_type_); |
| 458 | 458 |
| 459 // Computes the RTP timestamp of the first sample in |audio_frame| from | 459 // Computes the RTP timestamp of the first sample in |audio_frame| from |
| 460 // |GetPlayoutTimestamp|, which is the timestamp of the last sample of | 460 // |GetPlayoutTimestamp|, which is the timestamp of the last sample of |
| 461 // |audio_frame|. | 461 // |audio_frame|. |
| 462 uint32_t playout_timestamp = 0; | 462 uint32_t playout_timestamp = 0; |
| 463 if (GetPlayoutTimestamp(&playout_timestamp)) { | 463 if (GetPlayoutTimestamp(&playout_timestamp)) { |
| 464 audio_frame->timestamp_ = | 464 audio_frame->timestamp_ = playout_timestamp - |
| 465 playout_timestamp - audio_frame->samples_per_channel_; | 465 static_cast<uint32_t>(audio_frame->samples_per_channel_); |
| 466 } else { | 466 } else { |
| 467 // Remain 0 until we have a valid |playout_timestamp|. | 467 // Remain 0 until we have a valid |playout_timestamp|. |
| 468 audio_frame->timestamp_ = 0; | 468 audio_frame->timestamp_ = 0; |
| 469 } | 469 } |
| 470 | 470 |
| 471 return 0; | 471 return 0; |
| 472 } | 472 } |
| 473 | 473 |
| 474 int32_t AcmReceiver::AddCodec(int acm_codec_id, | 474 int32_t AcmReceiver::AddCodec(int acm_codec_id, |
| 475 uint8_t payload_type, | 475 uint8_t payload_type, |
| (...skipping 360 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 836 | 836 |
| 837 void AcmReceiver::GetDecodingCallStatistics( | 837 void AcmReceiver::GetDecodingCallStatistics( |
| 838 AudioDecodingCallStats* stats) const { | 838 AudioDecodingCallStats* stats) const { |
| 839 CriticalSectionScoped lock(crit_sect_.get()); | 839 CriticalSectionScoped lock(crit_sect_.get()); |
| 840 *stats = call_stats_.GetDecodingStatistics(); | 840 *stats = call_stats_.GetDecodingStatistics(); |
| 841 } | 841 } |
| 842 | 842 |
| 843 } // namespace acm2 | 843 } // namespace acm2 |
| 844 | 844 |
| 845 } // namespace webrtc | 845 } // namespace webrtc |
| OLD | NEW |