OLD | NEW |
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" | 11 #include "webrtc/modules/audio_mixer/audio_mixer_impl.h" |
12 | 12 |
13 #include <algorithm> | 13 #include <algorithm> |
14 #include <functional> | 14 #include <functional> |
15 #include <utility> | 15 #include <utility> |
16 | 16 |
17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" | 17 #include "webrtc/modules/audio_mixer/audio_frame_manipulator.h" |
18 #include "webrtc/modules/utility/include/audio_frame_operations.h" | 18 #include "webrtc/modules/utility/include/audio_frame_operations.h" |
19 #include "webrtc/system_wrappers/include/trace.h" | 19 #include "webrtc/system_wrappers/include/trace.h" |
20 | 20 |
21 namespace webrtc { | 21 namespace webrtc { |
22 namespace { | 22 namespace { |
23 | 23 |
24 class SourceFrame { | 24 class SourceFrame { |
25 public: | 25 public: |
| 26 // !!!: Style guide is against abbreviations. Plus these don't make sense. 'p' |
| 27 // for MixerAudioSource? |
26 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) | 28 SourceFrame(MixerAudioSource* p, AudioFrame* a, bool m, bool was_mixed_before) |
27 : audio_source_(p), | 29 : audio_source_(p), |
28 audio_frame_(a), | 30 audio_frame_(a), |
29 muted_(m), | 31 muted_(m), |
30 was_mixed_before_(was_mixed_before) { | 32 was_mixed_before_(was_mixed_before) { |
| 33 // !!!: Here would be a good place to check pointers aren't null. |
31 if (!muted_) { | 34 if (!muted_) { |
32 energy_ = NewMixerCalculateEnergy(*a); | 35 energy_ = NewMixerCalculateEnergy(*a); |
33 } | 36 } |
34 } | 37 } |
35 | 38 |
36 SourceFrame(MixerAudioSource* p, | 39 SourceFrame(MixerAudioSource* p, |
37 AudioFrame* a, | 40 AudioFrame* a, |
38 bool m, | 41 bool m, |
39 bool was_mixed_before, | 42 bool was_mixed_before, |
40 uint32_t energy) | 43 uint32_t energy) |
41 : audio_source_(p), | 44 : audio_source_(p), |
42 audio_frame_(a), | 45 audio_frame_(a), |
43 muted_(m), | 46 muted_(m), |
44 energy_(energy), | 47 energy_(energy), |
45 was_mixed_before_(was_mixed_before) {} | 48 was_mixed_before_(was_mixed_before) {} |
46 | 49 |
| 50 // !!!: Capital S here. |
47 // a.shouldMixBefore(b) is used to select mixer participants. | 51 // a.shouldMixBefore(b) is used to select mixer participants. |
48 bool shouldMixBefore(const SourceFrame& other) const { | 52 bool shouldMixBefore(const SourceFrame& other) const { |
49 if (muted_ != other.muted_) { | 53 if (muted_ != other.muted_) { |
50 return other.muted_; | 54 return other.muted_; |
51 } | 55 } |
52 | 56 |
53 const auto our_activity = audio_frame_->vad_activity_; | 57 const auto our_activity = audio_frame_->vad_activity_; |
54 const auto other_activity = other.audio_frame_->vad_activity_; | 58 const auto other_activity = other.audio_frame_->vad_activity_; |
55 | 59 |
56 if (our_activity != other_activity) { | 60 if (our_activity != other_activity) { |
57 return our_activity == AudioFrame::kVadActive; | 61 return our_activity == AudioFrame::kVadActive; |
58 } | 62 } |
59 | 63 |
60 return energy_ > other.energy_; | 64 return energy_ > other.energy_; |
61 } | 65 } |
62 | 66 |
| 67 // !!!: You should have accessors for these, so that you can e.g. check that |
63 MixerAudioSource* audio_source_; | 68 MixerAudioSource* audio_source_; |
64 AudioFrame* audio_frame_; | 69 AudioFrame* audio_frame_; |
65 bool muted_; | 70 bool muted_; |
66 uint32_t energy_; | 71 uint32_t energy_; |
67 bool was_mixed_before_; | 72 bool was_mixed_before_; |
68 }; | 73 }; |
69 | 74 |
| 75 // !!!: The term is usually "Downmix". |
70 // Remixes a frame between stereo and mono. | 76 // Remixes a frame between stereo and mono. |
71 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { | 77 void RemixFrame(AudioFrame* frame, size_t number_of_channels) { |
72 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); | 78 RTC_DCHECK(number_of_channels == 1 || number_of_channels == 2); |
73 if (frame->num_channels_ == 1 && number_of_channels == 2) { | 79 if (frame->num_channels_ == 1 && number_of_channels == 2) { |
74 AudioFrameOperations::MonoToStereo(frame); | 80 AudioFrameOperations::MonoToStereo(frame); |
75 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { | 81 } else if (frame->num_channels_ == 2 && number_of_channels == 1) { |
76 AudioFrameOperations::StereoToMono(frame); | 82 AudioFrameOperations::StereoToMono(frame); |
77 } | 83 } |
78 } | 84 } |
79 | 85 |
(...skipping 10 matching lines...) Loading... |
90 NewMixerRampOut(source_frame.audio_frame_); | 96 NewMixerRampOut(source_frame.audio_frame_); |
91 } | 97 } |
92 } | 98 } |
93 } | 99 } |
94 | 100 |
95 // Mix the AudioFrames stored in audioFrameList into mixed_audio. | 101 // Mix the AudioFrames stored in audioFrameList into mixed_audio. |
96 int32_t MixFromList(AudioFrame* mixed_audio, | 102 int32_t MixFromList(AudioFrame* mixed_audio, |
97 const AudioFrameList& audio_frame_list, | 103 const AudioFrameList& audio_frame_list, |
98 int32_t id, | 104 int32_t id, |
99 bool use_limiter) { | 105 bool use_limiter) { |
| 106 // !!!: Don't use WEBRTC_TRACE in new code |
100 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, | 107 WEBRTC_TRACE(kTraceStream, kTraceAudioMixerServer, id, |
101 "MixFromList(mixed_audio, audio_frame_list)"); | 108 "MixFromList(mixed_audio, audio_frame_list)"); |
| 109 // !!!: Consistently use {} for one-liners, or don't. |
102 if (audio_frame_list.empty()) | 110 if (audio_frame_list.empty()) |
103 return 0; | 111 return 0; |
104 | 112 |
105 if (audio_frame_list.size() == 1) { | 113 if (audio_frame_list.size() == 1) { |
106 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; | 114 mixed_audio->timestamp_ = audio_frame_list.front()->timestamp_; |
107 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; | 115 mixed_audio->elapsed_time_ms_ = audio_frame_list.front()->elapsed_time_ms_; |
108 } else { | 116 } else { |
109 // TODO(wu): Issue 3390. | 117 // TODO(wu): Issue 3390. |
110 // Audio frame timestamp is only supported in one channel case. | 118 // Audio frame timestamp is only supported in one channel case. |
111 mixed_audio->timestamp_ = 0; | 119 mixed_audio->timestamp_ = 0; |
(...skipping 79 matching lines...) Loading... |
191 | 199 |
192 if (sample_rate != kNbInHz && sample_rate != kWbInHz && | 200 if (sample_rate != kNbInHz && sample_rate != kWbInHz && |
193 sample_rate != kSwbInHz && sample_rate != kFbInHz) { | 201 sample_rate != kSwbInHz && sample_rate != kFbInHz) { |
194 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, | 202 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, id_, |
195 "Invalid frequency: %d", sample_rate); | 203 "Invalid frequency: %d", sample_rate); |
196 RTC_NOTREACHED(); | 204 RTC_NOTREACHED(); |
197 return; | 205 return; |
198 } | 206 } |
199 | 207 |
200 if (OutputFrequency() != sample_rate) { | 208 if (OutputFrequency() != sample_rate) { |
| 209 // !!!: Dangerous cast! |
201 SetOutputFrequency(static_cast<Frequency>(sample_rate)); | 210 SetOutputFrequency(static_cast<Frequency>(sample_rate)); |
202 } | 211 } |
203 | 212 |
204 AudioFrameList mix_list; | 213 AudioFrameList mix_list; |
205 AudioFrameList anonymous_mix_list; | 214 AudioFrameList anonymous_mix_list; |
206 size_t num_mixed_audio_sources; | 215 size_t num_mixed_audio_sources; |
207 { | 216 { |
208 rtc::CritScope lock(&crit_); | 217 rtc::CritScope lock(&crit_); |
209 mix_list = GetNonAnonymousAudio(); | 218 mix_list = GetNonAnonymousAudio(); |
210 anonymous_mix_list = GetAnonymousAudio(); | 219 anonymous_mix_list = GetAnonymousAudio(); |
(...skipping 302 matching lines...) Loading... |
513 } | 522 } |
514 | 523 |
515 int AudioMixerImpl::GetOutputAudioLevelFullRange() { | 524 int AudioMixerImpl::GetOutputAudioLevelFullRange() { |
516 RTC_DCHECK_RUN_ON(&thread_checker_); | 525 RTC_DCHECK_RUN_ON(&thread_checker_); |
517 const int level = audio_level_.LevelFullRange(); | 526 const int level = audio_level_.LevelFullRange(); |
518 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, | 527 WEBRTC_TRACE(kTraceStateInfo, kTraceAudioMixerServer, id_, |
519 "GetAudioOutputLevelFullRange() => level=%d", level); | 528 "GetAudioOutputLevelFullRange() => level=%d", level); |
520 return level; | 529 return level; |
521 } | 530 } |
522 } // namespace webrtc | 531 } // namespace webrtc |
OLD | NEW |