Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(655)

Side by Side Diff: webrtc/modules/audio_conference_mixer/source/audio_conference_mixer_impl.cc

Issue 1703833002: Remove ignored return code from modules. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: rebase Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after
182 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) { 182 if(_timeScheduler.TimeToNextUpdate(timeUntilNextProcess) != 0) {
183 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 183 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
184 "failed in TimeToNextUpdate() call"); 184 "failed in TimeToNextUpdate() call");
185 // Sanity check 185 // Sanity check
186 assert(false); 186 assert(false);
187 return -1; 187 return -1;
188 } 188 }
189 return timeUntilNextProcess; 189 return timeUntilNextProcess;
190 } 190 }
191 191
192 int32_t AudioConferenceMixerImpl::Process() { 192 void AudioConferenceMixerImpl::Process() {
193 size_t remainingParticipantsAllowedToMix = 193 size_t remainingParticipantsAllowedToMix =
194 kMaximumAmountOfMixedParticipants; 194 kMaximumAmountOfMixedParticipants;
195 { 195 {
196 CriticalSectionScoped cs(_crit.get()); 196 CriticalSectionScoped cs(_crit.get());
197 assert(_processCalls == 0); 197 assert(_processCalls == 0);
198 _processCalls++; 198 _processCalls++;
199 199
200 // Let the scheduler know that we are running one iteration. 200 // Let the scheduler know that we are running one iteration.
201 _timeScheduler.UpdateScheduler(); 201 _timeScheduler.UpdateScheduler();
202 } 202 }
(...skipping 12 matching lines...) Expand all
215 // TODO(henrike): this is probably more appropriate to do in 215 // TODO(henrike): this is probably more appropriate to do in
216 // GetLowestMixingFrequency(). 216 // GetLowestMixingFrequency().
217 if (lowFreq == 12000) { 217 if (lowFreq == 12000) {
218 lowFreq = 16000; 218 lowFreq = 16000;
219 } else if (lowFreq == 24000) { 219 } else if (lowFreq == 24000) {
220 lowFreq = 32000; 220 lowFreq = 32000;
221 } 221 }
222 if(lowFreq <= 0) { 222 if(lowFreq <= 0) {
223 CriticalSectionScoped cs(_crit.get()); 223 CriticalSectionScoped cs(_crit.get());
224 _processCalls--; 224 _processCalls--;
225 return 0; 225 return;
226 } else { 226 } else {
227 switch(lowFreq) { 227 switch(lowFreq) {
228 case 8000: 228 case 8000:
229 if(OutputFrequency() != kNbInHz) { 229 if(OutputFrequency() != kNbInHz) {
230 SetOutputFrequency(kNbInHz); 230 SetOutputFrequency(kNbInHz);
231 } 231 }
232 break; 232 break;
233 case 16000: 233 case 16000:
234 if(OutputFrequency() != kWbInHz) { 234 if(OutputFrequency() != kWbInHz) {
235 SetOutputFrequency(kWbInHz); 235 SetOutputFrequency(kWbInHz);
236 } 236 }
237 break; 237 break;
238 case 32000: 238 case 32000:
239 if(OutputFrequency() != kSwbInHz) { 239 if(OutputFrequency() != kSwbInHz) {
240 SetOutputFrequency(kSwbInHz); 240 SetOutputFrequency(kSwbInHz);
241 } 241 }
242 break; 242 break;
243 case 48000: 243 case 48000:
244 if(OutputFrequency() != kFbInHz) { 244 if(OutputFrequency() != kFbInHz) {
245 SetOutputFrequency(kFbInHz); 245 SetOutputFrequency(kFbInHz);
246 } 246 }
247 break; 247 break;
248 default: 248 default:
249 assert(false); 249 assert(false);
250 250
251 CriticalSectionScoped cs(_crit.get()); 251 CriticalSectionScoped cs(_crit.get());
252 _processCalls--; 252 _processCalls--;
253 return -1; 253 return;
254 } 254 }
255 } 255 }
256 256
257 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap, 257 UpdateToMix(&mixList, &rampOutList, &mixedParticipantsMap,
258 &remainingParticipantsAllowedToMix); 258 &remainingParticipantsAllowedToMix);
259 259
260 GetAdditionalAudio(&additionalFramesList); 260 GetAdditionalAudio(&additionalFramesList);
261 UpdateMixedStatus(mixedParticipantsMap); 261 UpdateMixedStatus(mixedParticipantsMap);
262 } 262 }
263 263
264 // Get an AudioFrame for mixing from the memory pool. 264 // Get an AudioFrame for mixing from the memory pool.
265 AudioFrame* mixedAudio = NULL; 265 AudioFrame* mixedAudio = NULL;
266 if(_audioFramePool->PopMemory(mixedAudio) == -1) { 266 if(_audioFramePool->PopMemory(mixedAudio) == -1) {
267 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id, 267 WEBRTC_TRACE(kTraceMemory, kTraceAudioMixerServer, _id,
268 "failed PopMemory() call"); 268 "failed PopMemory() call");
269 assert(false); 269 assert(false);
270 return -1; 270 return;
271 } 271 }
272 272
273 int retval = 0;
274 { 273 {
275 CriticalSectionScoped cs(_crit.get()); 274 CriticalSectionScoped cs(_crit.get());
276 275
277 // TODO(henrike): it might be better to decide the number of channels 276 // TODO(henrike): it might be better to decide the number of channels
278 // with an API instead of dynamically. 277 // with an API instead of dynamically.
279 278
280 // Find the max channels over all mixing lists. 279 // Find the max channels over all mixing lists.
281 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList), 280 const size_t num_mixed_channels = std::max(MaxNumChannels(&mixList),
282 std::max(MaxNumChannels(&additionalFramesList), 281 std::max(MaxNumChannels(&additionalFramesList),
283 MaxNumChannels(&rampOutList))); 282 MaxNumChannels(&rampOutList)));
(...skipping 13 matching lines...) Expand all
297 MixFromList(mixedAudio, mixList); 296 MixFromList(mixedAudio, mixList);
298 MixAnonomouslyFromList(mixedAudio, additionalFramesList); 297 MixAnonomouslyFromList(mixedAudio, additionalFramesList);
299 MixAnonomouslyFromList(mixedAudio, rampOutList); 298 MixAnonomouslyFromList(mixedAudio, rampOutList);
300 299
301 if(mixedAudio->samples_per_channel_ == 0) { 300 if(mixedAudio->samples_per_channel_ == 0) {
302 // Nothing was mixed, set the audio samples to silence. 301 // Nothing was mixed, set the audio samples to silence.
303 mixedAudio->samples_per_channel_ = _sampleSize; 302 mixedAudio->samples_per_channel_ = _sampleSize;
304 mixedAudio->Mute(); 303 mixedAudio->Mute();
305 } else { 304 } else {
306 // Only call the limiter if we have something to mix. 305 // Only call the limiter if we have something to mix.
307 if(!LimitMixedAudio(mixedAudio)) 306 LimitMixedAudio(mixedAudio);
308 retval = -1;
309 } 307 }
310 } 308 }
311 309
312 { 310 {
313 CriticalSectionScoped cs(_cbCrit.get()); 311 CriticalSectionScoped cs(_cbCrit.get());
314 if(_mixReceiver != NULL) { 312 if(_mixReceiver != NULL) {
315 const AudioFrame** dummy = NULL; 313 const AudioFrame** dummy = NULL;
316 _mixReceiver->NewMixedAudio( 314 _mixReceiver->NewMixedAudio(
317 _id, 315 _id,
318 *mixedAudio, 316 *mixedAudio,
319 dummy, 317 dummy,
320 0); 318 0);
321 } 319 }
322 } 320 }
323 321
324 // Reclaim all outstanding memory. 322 // Reclaim all outstanding memory.
325 _audioFramePool->PushMemory(mixedAudio); 323 _audioFramePool->PushMemory(mixedAudio);
326 ClearAudioFrameList(&mixList); 324 ClearAudioFrameList(&mixList);
327 ClearAudioFrameList(&rampOutList); 325 ClearAudioFrameList(&rampOutList);
328 ClearAudioFrameList(&additionalFramesList); 326 ClearAudioFrameList(&additionalFramesList);
329 { 327 {
330 CriticalSectionScoped cs(_crit.get()); 328 CriticalSectionScoped cs(_crit.get());
331 _processCalls--; 329 _processCalls--;
332 } 330 }
333 return retval; 331 return;
334 } 332 }
335 333
336 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback( 334 int32_t AudioConferenceMixerImpl::RegisterMixedStreamCallback(
337 AudioMixerOutputReceiver* mixReceiver) { 335 AudioMixerOutputReceiver* mixReceiver) {
338 CriticalSectionScoped cs(_cbCrit.get()); 336 CriticalSectionScoped cs(_cbCrit.get());
339 if(_mixReceiver != NULL) { 337 if(_mixReceiver != NULL) {
340 return -1; 338 return -1;
341 } 339 }
342 _mixReceiver = mixReceiver; 340 _mixReceiver = mixReceiver;
343 return 0; 341 return 0;
(...skipping 562 matching lines...) Expand 10 before | Expand all | Expand 10 after
906 904
907 if(error != _limiter->kNoError) { 905 if(error != _limiter->kNoError) {
908 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id, 906 WEBRTC_TRACE(kTraceError, kTraceAudioMixerServer, _id,
909 "Error from AudioProcessing: %d", error); 907 "Error from AudioProcessing: %d", error);
910 assert(false); 908 assert(false);
911 return false; 909 return false;
912 } 910 }
913 return true; 911 return true;
914 } 912 }
915 } // namespace webrtc 913 } // namespace webrtc
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698