OLD | NEW |
---|---|
1 /* | 1 /* |
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. | 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. |
3 * | 3 * |
4 * Use of this source code is governed by a BSD-style license | 4 * Use of this source code is governed by a BSD-style license |
5 * that can be found in the LICENSE file in the root of the source | 5 * that can be found in the LICENSE file in the root of the source |
6 * tree. An additional intellectual property rights grant can be found | 6 * tree. An additional intellectual property rights grant can be found |
7 * in the file PATENTS. All contributing project authors may | 7 * in the file PATENTS. All contributing project authors may |
8 * be found in the AUTHORS file in the root of the source tree. | 8 * be found in the AUTHORS file in the root of the source tree. |
9 */ | 9 */ |
10 | 10 |
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
429 for (int i = 0; i < num_proc_channels_; ++i) { | 429 for (int i = 0; i < num_proc_channels_; ++i) { |
430 input_resamplers_[i]->Resample(input_buffer_->fbuf_const()->channels()[i], | 430 input_resamplers_[i]->Resample(input_buffer_->fbuf_const()->channels()[i], |
431 input_num_frames_, | 431 input_num_frames_, |
432 data_->fbuf()->channels()[i], | 432 data_->fbuf()->channels()[i], |
433 proc_num_frames_); | 433 proc_num_frames_); |
434 } | 434 } |
435 } | 435 } |
436 } | 436 } |
437 | 437 |
438 void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const { | 438 void AudioBuffer::InterleaveTo(AudioFrame* frame, bool data_changed) const { |
439 assert(proc_num_frames_ == output_num_frames_); | |
440 assert(num_channels_ == num_input_channels_); | |
441 assert(frame->num_channels_ == num_channels_); | |
442 assert(frame->samples_per_channel_ == proc_num_frames_); | |
443 frame->vad_activity_ = activity_; | 439 frame->vad_activity_ = activity_; |
444 | |
445 if (!data_changed) { | 440 if (!data_changed) { |
446 return; | 441 return; |
447 } | 442 } |
448 | 443 |
449 Interleave(data_->ibuf()->channels(), | 444 assert(proc_num_frames_ == output_num_frames_); |
450 proc_num_frames_, | 445 assert(frame->num_channels_ == num_channels_ || num_channels_ == 1); |
451 num_channels_, | 446 assert(frame->samples_per_channel_ == proc_num_frames_); |
452 frame->data_); | 447 |
448 if (frame->num_channels_ == num_channels_) { | |
449 Interleave(data_->ibuf()->channels(), proc_num_frames_, num_channels_, | |
450 frame->data_); | |
451 } else { | |
452 // Copy single AudioBuffer channel into all AudioFrame channels | |
Andrew MacDonald
2015/07/24 23:50:39
Period at the end, and can you note that this is s
ekm
2015/07/29 00:37:19
Done.
| |
453 rtc::scoped_ptr<int16_t*> channel_ptr_copies( | |
454 new int16_t*[frame->num_channels_]); | |
Andrew MacDonald
2015/07/24 23:50:39
Arg, why did you switch to using dynamic allocatio
ekm
2015/07/29 00:37:19
Done.
| |
455 for (int i = 0; i < frame->num_channels_; ++i) { | |
456 channel_ptr_copies.get()[i] = data_->ibuf()->channels()[0]; | |
457 } | |
458 Interleave(channel_ptr_copies.get(), proc_num_frames_, num_channels_, | |
459 frame->data_); | |
460 } | |
453 } | 461 } |
454 | 462 |
455 void AudioBuffer::CopyLowPassToReference() { | 463 void AudioBuffer::CopyLowPassToReference() { |
456 reference_copied_ = true; | 464 reference_copied_ = true; |
457 if (!low_pass_reference_channels_.get() || | 465 if (!low_pass_reference_channels_.get() || |
458 low_pass_reference_channels_->num_channels() != num_channels_) { | 466 low_pass_reference_channels_->num_channels() != num_channels_) { |
459 low_pass_reference_channels_.reset( | 467 low_pass_reference_channels_.reset( |
460 new ChannelBuffer<int16_t>(num_split_frames_, | 468 new ChannelBuffer<int16_t>(num_split_frames_, |
461 num_proc_channels_)); | 469 num_proc_channels_)); |
462 } | 470 } |
463 for (int i = 0; i < num_proc_channels_; i++) { | 471 for (int i = 0; i < num_proc_channels_; i++) { |
464 memcpy(low_pass_reference_channels_->channels()[i], | 472 memcpy(low_pass_reference_channels_->channels()[i], |
465 split_bands_const(i)[kBand0To8kHz], | 473 split_bands_const(i)[kBand0To8kHz], |
466 low_pass_reference_channels_->num_frames_per_band() * | 474 low_pass_reference_channels_->num_frames_per_band() * |
467 sizeof(split_bands_const(i)[kBand0To8kHz][0])); | 475 sizeof(split_bands_const(i)[kBand0To8kHz][0])); |
468 } | 476 } |
469 } | 477 } |
470 | 478 |
471 void AudioBuffer::SplitIntoFrequencyBands() { | 479 void AudioBuffer::SplitIntoFrequencyBands() { |
472 splitting_filter_->Analysis(data_.get(), split_data_.get()); | 480 splitting_filter_->Analysis(data_.get(), split_data_.get()); |
473 } | 481 } |
474 | 482 |
475 void AudioBuffer::MergeFrequencyBands() { | 483 void AudioBuffer::MergeFrequencyBands() { |
476 splitting_filter_->Synthesis(split_data_.get(), data_.get()); | 484 splitting_filter_->Synthesis(split_data_.get(), data_.get()); |
477 } | 485 } |
478 | 486 |
479 } // namespace webrtc | 487 } // namespace webrtc |
OLD | NEW |