Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 /* | 1 /* |
| 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
| 3 * | 3 * |
| 4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
| 5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
| 6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
| 7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
| 8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
| 9 */ | 9 */ |
| 10 | 10 |
| (...skipping 417 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 428 if (input_num_frames_ != proc_num_frames_) { | 428 if (input_num_frames_ != proc_num_frames_) { |
| 429 for (int i = 0; i < num_proc_channels_; ++i) { | 429 for (int i = 0; i < num_proc_channels_; ++i) { |
| 430 input_resamplers_[i]->Resample(input_buffer_->fbuf_const()->channels()[i], | 430 input_resamplers_[i]->Resample(input_buffer_->fbuf_const()->channels()[i], |
| 431 input_num_frames_, | 431 input_num_frames_, |
| 432 data_->fbuf()->channels()[i], | 432 data_->fbuf()->channels()[i], |
| 433 proc_num_frames_); | 433 proc_num_frames_); |
| 434 } | 434 } |
| 435 } | 435 } |
| 436 } | 436 } |
| 437 | 437 |
| 438 void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const { | 438 void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) { |
| 439 assert(proc_num_frames_ == output_num_frames_); | |
| 440 assert(num_channels_ == num_input_channels_); | |
| 441 assert(frame->num_channels_ == num_channels_); | |
| 442 assert(frame->samples_per_channel_ == proc_num_frames_); | |
| 443 frame->vad_activity_ = activity_; | 439 frame->vad_activity_ = activity_; |
| 444 | |
| 445 if (!data_changed) { | 440 if (!data_changed) { |
| 446 return; | 441 return; |
| 447 } | 442 } |
| 448 | 443 |
| 449 Interleave(data_->ibuf()->channels(), | 444 assert(frame->num_channels_ == num_channels_ || num_channels_ == 1); |
| 450 proc_num_frames_, | 445 assert(frame->samples_per_channel_ == output_num_frames_); |
| 451 num_channels_, | 446 |
| 452 frame->data_); | 447 // Resample if necessary. |
| 448 IFChannelBuffer* data_ptr; | |
| 449 if (proc_num_frames_ != output_num_frames_) { | |
| 450 if (!output_buffer_) { | |
| 451 output_buffer_.reset( | |
| 452 new IFChannelBuffer(output_num_frames_, num_channels_)); | |
| 453 } | |
| 454 for (int i = 0; i < num_channels_; ++i) { | |
| 455 output_resamplers_[i]->Resample(data_->fbuf()->channels()[i], | |
| 456 proc_num_frames_, | |
| 457 output_buffer_->fbuf()->channels()[i], | |
| 458 output_num_frames_); | |
| 459 } | |
| 460 data_ptr = output_buffer_.get(); | |
| 461 } else { | |
| 462 data_ptr = data_.get(); | |
|
Andrew MacDonald
2015/07/29 03:52:27
Initialize data_ptr to data_.get() when you declar
ekm
2015/07/29 23:35:06
Done.
| |
| 463 } | |
| 464 | |
| 465 if (frame->num_channels_ == num_channels_) { | |
| 466 Interleave(data_ptr->ibuf()->channels(), proc_num_frames_, num_channels_, | |
| 467 frame->data_); | |
| 468 } else { | |
| 469 UpmixMonoToInterleaved(data_ptr->ibuf()->channels()[0], proc_num_frames_, | |
|
aluebs-webrtc
2015/07/29 22:17:10
Should this method be in a anonymous namespace in
ekm
2015/07/29 23:35:06
Hmm.... it seems cleaner and clearer as is for now
aluebs-webrtc
2015/07/30 15:28:07
As I said, I don't have a strong opinion and I agr
ekm
2015/07/30 21:23:50
Acknowledged.
| |
| 470 frame->num_channels_, frame->data_); | |
| 471 } | |
| 453 } | 472 } |
| 454 | 473 |
| 455 void AudioBuffer::CopyLowPassToReference() { | 474 void AudioBuffer::CopyLowPassToReference() { |
| 456 reference_copied_ = true; | 475 reference_copied_ = true; |
| 457 if (!low_pass_reference_channels_.get() || | 476 if (!low_pass_reference_channels_.get() || |
| 458 low_pass_reference_channels_->num_channels() != num_channels_) { | 477 low_pass_reference_channels_->num_channels() != num_channels_) { |
| 459 low_pass_reference_channels_.reset( | 478 low_pass_reference_channels_.reset( |
| 460 new ChannelBuffer<int16_t>(num_split_frames_, | 479 new ChannelBuffer<int16_t>(num_split_frames_, |
| 461 num_proc_channels_)); | 480 num_proc_channels_)); |
| 462 } | 481 } |
| 463 for (int i = 0; i < num_proc_channels_; i++) { | 482 for (int i = 0; i < num_proc_channels_; i++) { |
| 464 memcpy(low_pass_reference_channels_->channels()[i], | 483 memcpy(low_pass_reference_channels_->channels()[i], |
| 465 split_bands_const(i)[kBand0To8kHz], | 484 split_bands_const(i)[kBand0To8kHz], |
| 466 low_pass_reference_channels_->num_frames_per_band() * | 485 low_pass_reference_channels_->num_frames_per_band() * |
| 467 sizeof(split_bands_const(i)[kBand0To8kHz][0])); | 486 sizeof(split_bands_const(i)[kBand0To8kHz][0])); |
| 468 } | 487 } |
| 469 } | 488 } |
| 470 | 489 |
| 471 void AudioBuffer::SplitIntoFrequencyBands() { | 490 void AudioBuffer::SplitIntoFrequencyBands() { |
| 472 splitting_filter_->Analysis(data_.get(), split_data_.get()); | 491 splitting_filter_->Analysis(data_.get(), split_data_.get()); |
| 473 } | 492 } |
| 474 | 493 |
| 475 void AudioBuffer::MergeFrequencyBands() { | 494 void AudioBuffer::MergeFrequencyBands() { |
| 476 splitting_filter_->Synthesis(split_data_.get(), data_.get()); | 495 splitting_filter_->Synthesis(split_data_.get(), data_.get()); |
| 477 } | 496 } |
| 478 | 497 |
| 479 } // namespace webrtc | 498 } // namespace webrtc |
| OLD | NEW |