Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(194)

Side by Side Diff: webrtc/voice_engine/channel.cc

Issue 1311733003: Stylizing AudioConferenceMixer. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: refine two commenting Created 5 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « webrtc/voice_engine/channel.h ('k') | webrtc/voice_engine/output_mixer.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 427 matching lines...) Expand 10 before | Expand all | Expand 10 after
438 "IncomingPacket invalid RTP header"); 438 "IncomingPacket invalid RTP header");
439 return false; 439 return false;
440 } 440 }
441 header.payload_type_frequency = 441 header.payload_type_frequency =
442 rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType); 442 rtp_payload_registry_->GetPayloadTypeFrequency(header.payloadType);
443 if (header.payload_type_frequency < 0) 443 if (header.payload_type_frequency < 0)
444 return false; 444 return false;
445 return ReceivePacket(rtp_packet, rtp_packet_length, header, false); 445 return ReceivePacket(rtp_packet, rtp_packet_length, header, false);
446 } 446 }
447 447
448 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame& audioFrame) 448 int32_t Channel::GetAudioFrame(int32_t id, AudioFrame* audioFrame)
449 { 449 {
450 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), 450 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
451 "Channel::GetAudioFrame(id=%d)", id); 451 "Channel::GetAudioFrame(id=%d)", id);
452 452
453 // Get 10ms raw PCM data from the ACM (mixer limits output frequency) 453 // Get 10ms raw PCM data from the ACM (mixer limits output frequency)
454 if (audio_coding_->PlayoutData10Ms(audioFrame.sample_rate_hz_, 454 if (audio_coding_->PlayoutData10Ms(audioFrame->sample_rate_hz_,
455 &audioFrame) == -1) 455 audioFrame) == -1)
456 { 456 {
457 WEBRTC_TRACE(kTraceError, kTraceVoice, 457 WEBRTC_TRACE(kTraceError, kTraceVoice,
458 VoEId(_instanceId,_channelId), 458 VoEId(_instanceId,_channelId),
459 "Channel::GetAudioFrame() PlayoutData10Ms() failed!"); 459 "Channel::GetAudioFrame() PlayoutData10Ms() failed!");
460 // In all likelihood, the audio in this frame is garbage. We return an 460 // In all likelihood, the audio in this frame is garbage. We return an
461 // error so that the audio mixer module doesn't add it to the mix. As 461 // error so that the audio mixer module doesn't add it to the mix. As
462 // a result, it won't be played out and the actions skipped here are 462 // a result, it won't be played out and the actions skipped here are
463 // irrelevant. 463 // irrelevant.
464 return -1; 464 return -1;
465 } 465 }
466 466
467 if (_RxVadDetection) 467 if (_RxVadDetection)
468 { 468 {
469 UpdateRxVadDetection(audioFrame); 469 UpdateRxVadDetection(*audioFrame);
470 } 470 }
471 471
472 // Convert module ID to internal VoE channel ID 472 // Convert module ID to internal VoE channel ID
473 audioFrame.id_ = VoEChannelId(audioFrame.id_); 473 audioFrame->id_ = VoEChannelId(audioFrame->id_);
474 // Store speech type for dead-or-alive detection 474 // Store speech type for dead-or-alive detection
475 _outputSpeechType = audioFrame.speech_type_; 475 _outputSpeechType = audioFrame->speech_type_;
476 476
477 ChannelState::State state = channel_state_.Get(); 477 ChannelState::State state = channel_state_.Get();
478 478
479 if (state.rx_apm_is_enabled) { 479 if (state.rx_apm_is_enabled) {
480 int err = rx_audioproc_->ProcessStream(&audioFrame); 480 int err = rx_audioproc_->ProcessStream(audioFrame);
481 if (err) { 481 if (err) {
482 LOG(LS_ERROR) << "ProcessStream() error: " << err; 482 LOG(LS_ERROR) << "ProcessStream() error: " << err;
483 assert(false); 483 assert(false);
484 } 484 }
485 } 485 }
486 486
487 float output_gain = 1.0f; 487 float output_gain = 1.0f;
488 float left_pan = 1.0f; 488 float left_pan = 1.0f;
489 float right_pan = 1.0f; 489 float right_pan = 1.0f;
490 { 490 {
491 CriticalSectionScoped cs(&volume_settings_critsect_); 491 CriticalSectionScoped cs(&volume_settings_critsect_);
492 output_gain = _outputGain; 492 output_gain = _outputGain;
493 left_pan = _panLeft; 493 left_pan = _panLeft;
494 right_pan= _panRight; 494 right_pan= _panRight;
495 } 495 }
496 496
497 // Output volume scaling 497 // Output volume scaling
498 if (output_gain < 0.99f || output_gain > 1.01f) 498 if (output_gain < 0.99f || output_gain > 1.01f)
499 { 499 {
500 AudioFrameOperations::ScaleWithSat(output_gain, audioFrame); 500 AudioFrameOperations::ScaleWithSat(output_gain, *audioFrame);
501 } 501 }
502 502
503 // Scale left and/or right channel(s) if stereo and master balance is 503 // Scale left and/or right channel(s) if stereo and master balance is
504 // active 504 // active
505 505
506 if (left_pan != 1.0f || right_pan != 1.0f) 506 if (left_pan != 1.0f || right_pan != 1.0f)
507 { 507 {
508 if (audioFrame.num_channels_ == 1) 508 if (audioFrame->num_channels_ == 1)
509 { 509 {
510 // Emulate stereo mode since panning is active. 510 // Emulate stereo mode since panning is active.
511 // The mono signal is copied to both left and right channels here. 511 // The mono signal is copied to both left and right channels here.
512 AudioFrameOperations::MonoToStereo(&audioFrame); 512 AudioFrameOperations::MonoToStereo(audioFrame);
513 } 513 }
514 // For true stereo mode (when we are receiving a stereo signal), no 514 // For true stereo mode (when we are receiving a stereo signal), no
515 // action is needed. 515 // action is needed.
516 516
517 // Do the panning operation (the audio frame contains stereo at this 517 // Do the panning operation (the audio frame contains stereo at this
518 // stage) 518 // stage)
519 AudioFrameOperations::Scale(left_pan, right_pan, audioFrame); 519 AudioFrameOperations::Scale(left_pan, right_pan, *audioFrame);
520 } 520 }
521 521
522 // Mix decoded PCM output with file if file mixing is enabled 522 // Mix decoded PCM output with file if file mixing is enabled
523 if (state.output_file_playing) 523 if (state.output_file_playing)
524 { 524 {
525 MixAudioWithFile(audioFrame, audioFrame.sample_rate_hz_); 525 MixAudioWithFile(*audioFrame, audioFrame->sample_rate_hz_);
526 } 526 }
527 527
528 // External media 528 // External media
529 if (_outputExternalMedia) 529 if (_outputExternalMedia)
530 { 530 {
531 CriticalSectionScoped cs(&_callbackCritSect); 531 CriticalSectionScoped cs(&_callbackCritSect);
532 const bool isStereo = (audioFrame.num_channels_ == 2); 532 const bool isStereo = (audioFrame->num_channels_ == 2);
533 if (_outputExternalMediaCallbackPtr) 533 if (_outputExternalMediaCallbackPtr)
534 { 534 {
535 _outputExternalMediaCallbackPtr->Process( 535 _outputExternalMediaCallbackPtr->Process(
536 _channelId, 536 _channelId,
537 kPlaybackPerChannel, 537 kPlaybackPerChannel,
538 (int16_t*)audioFrame.data_, 538 (int16_t*)audioFrame->data_,
539 audioFrame.samples_per_channel_, 539 audioFrame->samples_per_channel_,
540 audioFrame.sample_rate_hz_, 540 audioFrame->sample_rate_hz_,
541 isStereo); 541 isStereo);
542 } 542 }
543 } 543 }
544 544
545 // Record playout if enabled 545 // Record playout if enabled
546 { 546 {
547 CriticalSectionScoped cs(&_fileCritSect); 547 CriticalSectionScoped cs(&_fileCritSect);
548 548
549 if (_outputFileRecording && _outputFileRecorderPtr) 549 if (_outputFileRecording && _outputFileRecorderPtr)
550 { 550 {
551 _outputFileRecorderPtr->RecordAudioToFile(audioFrame); 551 _outputFileRecorderPtr->RecordAudioToFile(*audioFrame);
552 } 552 }
553 } 553 }
554 554
555 // Measure audio level (0-9) 555 // Measure audio level (0-9)
556 _outputAudioLevel.ComputeLevel(audioFrame); 556 _outputAudioLevel.ComputeLevel(*audioFrame);
557 557
558 if (capture_start_rtp_time_stamp_ < 0 && audioFrame.timestamp_ != 0) { 558 if (capture_start_rtp_time_stamp_ < 0 && audioFrame->timestamp_ != 0) {
559 // The first frame with a valid rtp timestamp. 559 // The first frame with a valid rtp timestamp.
560 capture_start_rtp_time_stamp_ = audioFrame.timestamp_; 560 capture_start_rtp_time_stamp_ = audioFrame->timestamp_;
561 } 561 }
562 562
563 if (capture_start_rtp_time_stamp_ >= 0) { 563 if (capture_start_rtp_time_stamp_ >= 0) {
564 // audioFrame.timestamp_ should be valid from now on. 564 // audioFrame.timestamp_ should be valid from now on.
565 565
566 // Compute elapsed time. 566 // Compute elapsed time.
567 int64_t unwrap_timestamp = 567 int64_t unwrap_timestamp =
568 rtp_ts_wraparound_handler_->Unwrap(audioFrame.timestamp_); 568 rtp_ts_wraparound_handler_->Unwrap(audioFrame->timestamp_);
569 audioFrame.elapsed_time_ms_ = 569 audioFrame->elapsed_time_ms_ =
570 (unwrap_timestamp - capture_start_rtp_time_stamp_) / 570 (unwrap_timestamp - capture_start_rtp_time_stamp_) /
571 (GetPlayoutFrequency() / 1000); 571 (GetPlayoutFrequency() / 1000);
572 572
573 { 573 {
574 CriticalSectionScoped lock(ts_stats_lock_.get()); 574 CriticalSectionScoped lock(ts_stats_lock_.get());
575 // Compute ntp time. 575 // Compute ntp time.
576 audioFrame.ntp_time_ms_ = ntp_estimator_.Estimate( 576 audioFrame->ntp_time_ms_ = ntp_estimator_.Estimate(
577 audioFrame.timestamp_); 577 audioFrame->timestamp_);
578 // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received. 578 // |ntp_time_ms_| won't be valid until at least 2 RTCP SRs are received.
579 if (audioFrame.ntp_time_ms_ > 0) { 579 if (audioFrame->ntp_time_ms_ > 0) {
580 // Compute |capture_start_ntp_time_ms_| so that 580 // Compute |capture_start_ntp_time_ms_| so that
581 // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_| 581 // |capture_start_ntp_time_ms_| + |elapsed_time_ms_| == |ntp_time_ms_|
582 capture_start_ntp_time_ms_ = 582 capture_start_ntp_time_ms_ =
583 audioFrame.ntp_time_ms_ - audioFrame.elapsed_time_ms_; 583 audioFrame->ntp_time_ms_ - audioFrame->elapsed_time_ms_;
584 } 584 }
585 } 585 }
586 } 586 }
587 587
588 return 0; 588 return 0;
589 } 589 }
590 590
591 int32_t 591 int32_t
592 Channel::NeededFrequency(int32_t id) 592 Channel::NeededFrequency(int32_t id) const
593 { 593 {
594 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId), 594 WEBRTC_TRACE(kTraceStream, kTraceVoice, VoEId(_instanceId,_channelId),
595 "Channel::NeededFrequency(id=%d)", id); 595 "Channel::NeededFrequency(id=%d)", id);
596 596
597 int highestNeeded = 0; 597 int highestNeeded = 0;
598 598
599 // Determine highest needed receive frequency 599 // Determine highest needed receive frequency
600 int32_t receiveFrequency = audio_coding_->ReceiveFrequency(); 600 int32_t receiveFrequency = audio_coding_->ReceiveFrequency();
601 601
602 // Return the bigger of playout and receive frequency in the ACM. 602 // Return the bigger of playout and receive frequency in the ACM.
(...skipping 3544 matching lines...) Expand 10 before | Expand all | Expand 10 after
4147 int64_t min_rtt = 0; 4147 int64_t min_rtt = 0;
4148 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt) 4148 if (_rtpRtcpModule->RTT(remoteSSRC, &rtt, &avg_rtt, &min_rtt, &max_rtt)
4149 != 0) { 4149 != 0) {
4150 return 0; 4150 return 0;
4151 } 4151 }
4152 return rtt; 4152 return rtt;
4153 } 4153 }
4154 4154
4155 } // namespace voe 4155 } // namespace voe
4156 } // namespace webrtc 4156 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/voice_engine/channel.h ('k') | webrtc/voice_engine/output_mixer.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698