Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(63)

Side by Side Diff: webrtc/modules/audio_processing/audio_processing_impl.cc

Issue 1413483003: Added option to specify a maximum file size when recording an AEC dump. (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Added function to avoid breaking Chromium. Created 5 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 /* 1 /*
2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved. 2 * Copyright (c) 2012 The WebRTC project authors. All Rights Reserved.
3 * 3 *
4 * Use of this source code is governed by a BSD-style license 4 * Use of this source code is governed by a BSD-style license
5 * that can be found in the LICENSE file in the root of the source 5 * that can be found in the LICENSE file in the root of the source
6 * tree. An additional intellectual property rights grant can be found 6 * tree. An additional intellectual property rights grant can be found
7 * in the file PATENTS. All contributing project authors may 7 * in the file PATENTS. All contributing project authors may
8 * be found in the AUTHORS file in the root of the source tree. 8 * be found in the AUTHORS file in the root of the source tree.
9 */ 9 */
10 10
(...skipping 609 matching lines...) Expand 10 before | Expand all | Expand 10 after
620 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest); 620 capture_.capture_audio->CopyTo(formats_.api_format.output_stream(), dest);
621 621
622 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 622 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
623 if (debug_dump_.debug_file->Open()) { 623 if (debug_dump_.debug_file->Open()) {
624 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 624 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
625 const size_t channel_size = 625 const size_t channel_size =
626 sizeof(float) * formats_.api_format.output_stream().num_frames(); 626 sizeof(float) * formats_.api_format.output_stream().num_frames();
627 for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i) 627 for (int i = 0; i < formats_.api_format.output_stream().num_channels(); ++i)
628 msg->add_output_channel(dest[i], channel_size); 628 msg->add_output_channel(dest[i], channel_size);
629 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 629 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
630 &debug_dump_.num_bytes_left_for_log_,
630 &crit_debug_, &debug_dump_.capture)); 631 &crit_debug_, &debug_dump_.capture));
631 } 632 }
632 #endif 633 #endif
633 634
634 return kNoError; 635 return kNoError;
635 } 636 }
636 637
637 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) { 638 int AudioProcessingImpl::ProcessStream(AudioFrame* frame) {
638 { 639 {
639 // Acquire the capture lock in order to safely call the function 640 // Acquire the capture lock in order to safely call the function
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
706 capture_.capture_audio->InterleaveTo(frame, 707 capture_.capture_audio->InterleaveTo(frame,
707 output_copy_needed(is_data_processed())); 708 output_copy_needed(is_data_processed()));
708 709
709 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 710 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
710 if (debug_dump_.debug_file->Open()) { 711 if (debug_dump_.debug_file->Open()) {
711 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream(); 712 audioproc::Stream* msg = debug_dump_.capture.event_msg->mutable_stream();
712 const size_t data_size = 713 const size_t data_size =
713 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 714 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
714 msg->set_output_data(frame->data_, data_size); 715 msg->set_output_data(frame->data_, data_size);
715 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 716 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
717 &debug_dump_.num_bytes_left_for_log_,
716 &crit_debug_, &debug_dump_.capture)); 718 &crit_debug_, &debug_dump_.capture));
717 } 719 }
718 #endif 720 #endif
719 721
720 return kNoError; 722 return kNoError;
721 } 723 }
722 724
723 int AudioProcessingImpl::ProcessStreamLocked() { 725 int AudioProcessingImpl::ProcessStreamLocked() {
724 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 726 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
725 if (debug_dump_.debug_file->Open()) { 727 if (debug_dump_.debug_file->Open()) {
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
871 if (debug_dump_.debug_file->Open()) { 873 if (debug_dump_.debug_file->Open()) {
872 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); 874 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
873 audioproc::ReverseStream* msg = 875 audioproc::ReverseStream* msg =
874 debug_dump_.render.event_msg->mutable_reverse_stream(); 876 debug_dump_.render.event_msg->mutable_reverse_stream();
875 const size_t channel_size = 877 const size_t channel_size =
876 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames(); 878 sizeof(float) * formats_.api_format.reverse_input_stream().num_frames();
877 for (int i = 0; 879 for (int i = 0;
878 i < formats_.api_format.reverse_input_stream().num_channels(); ++i) 880 i < formats_.api_format.reverse_input_stream().num_channels(); ++i)
879 msg->add_channel(src[i], channel_size); 881 msg->add_channel(src[i], channel_size);
880 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 882 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
883 &debug_dump_.num_bytes_left_for_log_,
881 &crit_debug_, &debug_dump_.render)); 884 &crit_debug_, &debug_dump_.render));
882 } 885 }
883 #endif 886 #endif
884 887
885 render_.render_audio->CopyFrom(src, 888 render_.render_audio->CopyFrom(src,
886 formats_.api_format.reverse_input_stream()); 889 formats_.api_format.reverse_input_stream());
887 return ProcessReverseStreamLocked(); 890 return ProcessReverseStreamLocked();
888 } 891 }
889 892
890 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) { 893 int AudioProcessingImpl::ProcessReverseStream(AudioFrame* frame) {
(...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after
937 940
938 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 941 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
939 if (debug_dump_.debug_file->Open()) { 942 if (debug_dump_.debug_file->Open()) {
940 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM); 943 debug_dump_.render.event_msg->set_type(audioproc::Event::REVERSE_STREAM);
941 audioproc::ReverseStream* msg = 944 audioproc::ReverseStream* msg =
942 debug_dump_.render.event_msg->mutable_reverse_stream(); 945 debug_dump_.render.event_msg->mutable_reverse_stream();
943 const size_t data_size = 946 const size_t data_size =
944 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_; 947 sizeof(int16_t) * frame->samples_per_channel_ * frame->num_channels_;
945 msg->set_data(frame->data_, data_size); 948 msg->set_data(frame->data_, data_size);
946 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 949 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
950 &debug_dump_.num_bytes_left_for_log_,
947 &crit_debug_, &debug_dump_.render)); 951 &crit_debug_, &debug_dump_.render));
948 } 952 }
949 #endif 953 #endif
950 render_.render_audio->DeinterleaveFrom(frame); 954 render_.render_audio->DeinterleaveFrom(frame);
951 return ProcessReverseStreamLocked(); 955 return ProcessReverseStreamLocked();
952 } 956 }
953 957
954 int AudioProcessingImpl::ProcessReverseStreamLocked() { 958 int AudioProcessingImpl::ProcessReverseStreamLocked() {
955 AudioBuffer* ra = render_.render_audio.get(); // For brevity. 959 AudioBuffer* ra = render_.render_audio.get(); // For brevity.
956 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) { 960 if (formats_.rev_proc_format.sample_rate_hz() == kSampleRate32kHz) {
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
1022 rtc::CritScope cs(&crit_capture_); 1026 rtc::CritScope cs(&crit_capture_);
1023 capture_.delay_offset_ms = offset; 1027 capture_.delay_offset_ms = offset;
1024 } 1028 }
1025 1029
1026 int AudioProcessingImpl::delay_offset_ms() const { 1030 int AudioProcessingImpl::delay_offset_ms() const {
1027 rtc::CritScope cs(&crit_capture_); 1031 rtc::CritScope cs(&crit_capture_);
1028 return capture_.delay_offset_ms; 1032 return capture_.delay_offset_ms;
1029 } 1033 }
1030 1034
1031 int AudioProcessingImpl::StartDebugRecording( 1035 int AudioProcessingImpl::StartDebugRecording(
1032 const char filename[AudioProcessing::kMaxFilenameSize]) { 1036 const char filename[AudioProcessing::kMaxFilenameSize],
1037 int64_t max_log_size_bytes) {
1033 // Run in a single-threaded manner. 1038 // Run in a single-threaded manner.
1034 rtc::CritScope cs_render(&crit_render_); 1039 rtc::CritScope cs_render(&crit_render_);
1035 rtc::CritScope cs_capture(&crit_capture_); 1040 rtc::CritScope cs_capture(&crit_capture_);
1036 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, ""); 1041 static_assert(kMaxFilenameSize == FileWrapper::kMaxFileNameSize, "");
1037 1042
1038 if (filename == nullptr) { 1043 if (filename == nullptr) {
1039 return kNullPointerError; 1044 return kNullPointerError;
1040 } 1045 }
1041 1046
1042 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1047 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1048 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes;
1043 // Stop any ongoing recording. 1049 // Stop any ongoing recording.
1044 if (debug_dump_.debug_file->Open()) { 1050 if (debug_dump_.debug_file->Open()) {
1045 if (debug_dump_.debug_file->CloseFile() == -1) { 1051 if (debug_dump_.debug_file->CloseFile() == -1) {
1046 return kFileError; 1052 return kFileError;
1047 } 1053 }
1048 } 1054 }
1049 1055
1050 if (debug_dump_.debug_file->OpenFile(filename, false) == -1) { 1056 if (debug_dump_.debug_file->OpenFile(filename, false) == -1) {
1051 debug_dump_.debug_file->CloseFile(); 1057 debug_dump_.debug_file->CloseFile();
1052 return kFileError; 1058 return kFileError;
1053 } 1059 }
1054 1060
1055 RETURN_ON_ERR(WriteConfigMessage(true)); 1061 RETURN_ON_ERR(WriteConfigMessage(true));
1056 RETURN_ON_ERR(WriteInitMessage()); 1062 RETURN_ON_ERR(WriteInitMessage());
1057 return kNoError; 1063 return kNoError;
1058 #else 1064 #else
1059 return kUnsupportedFunctionError; 1065 return kUnsupportedFunctionError;
1060 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1066 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1061 } 1067 }
1062 1068
1063 int AudioProcessingImpl::StartDebugRecording(FILE* handle) { 1069 int AudioProcessingImpl::StartDebugRecording(FILE* handle,
1070 int64_t max_log_size_bytes) {
1064 // Run in a single-threaded manner. 1071 // Run in a single-threaded manner.
1065 rtc::CritScope cs_render(&crit_render_); 1072 rtc::CritScope cs_render(&crit_render_);
1066 rtc::CritScope cs_capture(&crit_capture_); 1073 rtc::CritScope cs_capture(&crit_capture_);
1067 1074
1068 if (handle == nullptr) { 1075 if (handle == nullptr) {
1069 return kNullPointerError; 1076 return kNullPointerError;
1070 } 1077 }
1071 1078
1072 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1079 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1080 debug_dump_.num_bytes_left_for_log_ = max_log_size_bytes;
1081
1073 // Stop any ongoing recording. 1082 // Stop any ongoing recording.
1074 if (debug_dump_.debug_file->Open()) { 1083 if (debug_dump_.debug_file->Open()) {
1075 if (debug_dump_.debug_file->CloseFile() == -1) { 1084 if (debug_dump_.debug_file->CloseFile() == -1) {
1076 return kFileError; 1085 return kFileError;
1077 } 1086 }
1078 } 1087 }
1079 1088
1080 if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) { 1089 if (debug_dump_.debug_file->OpenFromFileHandle(handle, true, false) == -1) {
1081 return kFileError; 1090 return kFileError;
1082 } 1091 }
1083 1092
1084 RETURN_ON_ERR(WriteConfigMessage(true)); 1093 RETURN_ON_ERR(WriteConfigMessage(true));
1085 RETURN_ON_ERR(WriteInitMessage()); 1094 RETURN_ON_ERR(WriteInitMessage());
1086 return kNoError; 1095 return kNoError;
1087 #else 1096 #else
1088 return kUnsupportedFunctionError; 1097 return kUnsupportedFunctionError;
1089 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1098 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1090 } 1099 }
1091 1100
1092 int AudioProcessingImpl::StartDebugRecordingForPlatformFile( 1101 int AudioProcessingImpl::StartDebugRecordingForPlatformFile(
1093 rtc::PlatformFile handle) { 1102 rtc::PlatformFile handle) {
1094 // Run in a single-threaded manner. 1103 // Run in a single-threaded manner.
1095 rtc::CritScope cs_render(&crit_render_); 1104 rtc::CritScope cs_render(&crit_render_);
1096 rtc::CritScope cs_capture(&crit_capture_); 1105 rtc::CritScope cs_capture(&crit_capture_);
1097 FILE* stream = rtc::FdopenPlatformFileForWriting(handle); 1106 FILE* stream = rtc::FdopenPlatformFileForWriting(handle);
1098 return StartDebugRecording(stream); 1107 return StartDebugRecording(stream, -1);
1099 } 1108 }
1100 1109
1101 int AudioProcessingImpl::StopDebugRecording() { 1110 int AudioProcessingImpl::StopDebugRecording() {
1102 // Run in a single-threaded manner. 1111 // Run in a single-threaded manner.
1103 rtc::CritScope cs_render(&crit_render_); 1112 rtc::CritScope cs_render(&crit_render_);
1104 rtc::CritScope cs_capture(&crit_capture_); 1113 rtc::CritScope cs_capture(&crit_capture_);
1105 1114
1106 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1115 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1107 // We just return if recording hasn't started. 1116 // We just return if recording hasn't started.
1108 if (debug_dump_.debug_file->Open()) { 1117 if (debug_dump_.debug_file->Open()) {
(...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after
1369 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps", 1378 RTC_HISTOGRAM_ENUMERATION("WebRTC.Audio.NumOfAecSystemDelayJumps",
1370 capture_.aec_system_delay_jumps, 51); 1379 capture_.aec_system_delay_jumps, 51);
1371 } 1380 }
1372 capture_.aec_system_delay_jumps = -1; 1381 capture_.aec_system_delay_jumps = -1;
1373 capture_.last_aec_system_delay_ms = 0; 1382 capture_.last_aec_system_delay_ms = 0;
1374 } 1383 }
1375 1384
1376 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP 1385 #ifdef WEBRTC_AUDIOPROC_DEBUG_DUMP
1377 int AudioProcessingImpl::WriteMessageToDebugFile( 1386 int AudioProcessingImpl::WriteMessageToDebugFile(
1378 FileWrapper* debug_file, 1387 FileWrapper* debug_file,
1388 int64_t* filesize_limit_bytes,
1379 rtc::CriticalSection* crit_debug, 1389 rtc::CriticalSection* crit_debug,
1380 ApmDebugDumpThreadState* debug_state) { 1390 ApmDebugDumpThreadState* debug_state) {
1381 int32_t size = debug_state->event_msg->ByteSize(); 1391 int32_t size = debug_state->event_msg->ByteSize();
1382 if (size <= 0) { 1392 if (size <= 0) {
1383 return kUnspecifiedError; 1393 return kUnspecifiedError;
1384 } 1394 }
1385 #if defined(WEBRTC_ARCH_BIG_ENDIAN) 1395 #if defined(WEBRTC_ARCH_BIG_ENDIAN)
1386 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be 1396 // TODO(ajm): Use little-endian "on the wire". For the moment, we can be
1387 // pretty safe in assuming little-endian. 1397 // pretty safe in assuming little-endian.
1388 #endif 1398 #endif
1389 1399
1390 if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) { 1400 if (!debug_state->event_msg->SerializeToString(&debug_state->event_str)) {
1391 return kUnspecifiedError; 1401 return kUnspecifiedError;
1392 } 1402 }
1393 1403
1394 { 1404 {
1395 // Ensure atomic writes of the message. 1405 // Ensure atomic writes of the message.
1396 rtc::CritScope cs_capture(crit_debug); 1406 rtc::CritScope cs_debug(crit_debug);
1407
1408 RTC_DCHECK(debug_file->Open());
1409 // Update the byte counter.
1410 if (*filesize_limit_bytes >= 0) {
1411 *filesize_limit_bytes -=
1412 (sizeof(int32_t) + debug_state->event_str.length());
1413 if (*filesize_limit_bytes < 0) {
1414 // Not enough bytes are left to write this message, so stop logging.
1415 debug_file->CloseFile();
1416 return kNoError;
1417 }
1418 }
1397 // Write message preceded by its size. 1419 // Write message preceded by its size.
1398 if (!debug_file->Write(&size, sizeof(int32_t))) { 1420 if (!debug_file->Write(&size, sizeof(int32_t))) {
1399 return kFileError; 1421 return kFileError;
1400 } 1422 }
1401 if (!debug_file->Write(debug_state->event_str.data(), 1423 if (!debug_file->Write(debug_state->event_str.data(),
1402 debug_state->event_str.length())) { 1424 debug_state->event_str.length())) {
1403 return kFileError; 1425 return kFileError;
1404 } 1426 }
1405 } 1427 }
1406 1428
(...skipping 14 matching lines...) Expand all
1421 msg->set_num_reverse_channels( 1443 msg->set_num_reverse_channels(
1422 formats_.api_format.reverse_input_stream().num_channels()); 1444 formats_.api_format.reverse_input_stream().num_channels());
1423 msg->set_reverse_sample_rate( 1445 msg->set_reverse_sample_rate(
1424 formats_.api_format.reverse_input_stream().sample_rate_hz()); 1446 formats_.api_format.reverse_input_stream().sample_rate_hz());
1425 msg->set_output_sample_rate( 1447 msg->set_output_sample_rate(
1426 formats_.api_format.output_stream().sample_rate_hz()); 1448 formats_.api_format.output_stream().sample_rate_hz());
1427 // TODO(ekmeyerson): Add reverse output fields to 1449 // TODO(ekmeyerson): Add reverse output fields to
1428 // debug_dump_.capture.event_msg. 1450 // debug_dump_.capture.event_msg.
1429 1451
1430 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 1452 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1453 &debug_dump_.num_bytes_left_for_log_,
1431 &crit_debug_, &debug_dump_.capture)); 1454 &crit_debug_, &debug_dump_.capture));
1432 return kNoError; 1455 return kNoError;
1433 } 1456 }
1434 1457
1435 int AudioProcessingImpl::WriteConfigMessage(bool forced) { 1458 int AudioProcessingImpl::WriteConfigMessage(bool forced) {
1436 audioproc::Config config; 1459 audioproc::Config config;
1437 1460
1438 config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled()); 1461 config.set_aec_enabled(public_submodules_->echo_cancellation->is_enabled());
1439 config.set_aec_delay_agnostic_enabled( 1462 config.set_aec_delay_agnostic_enabled(
1440 public_submodules_->echo_cancellation->is_delay_agnostic_enabled()); 1463 public_submodules_->echo_cancellation->is_delay_agnostic_enabled());
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
1473 debug_dump_.capture.last_serialized_config == serialized_config) { 1496 debug_dump_.capture.last_serialized_config == serialized_config) {
1474 return kNoError; 1497 return kNoError;
1475 } 1498 }
1476 1499
1477 debug_dump_.capture.last_serialized_config = serialized_config; 1500 debug_dump_.capture.last_serialized_config = serialized_config;
1478 1501
1479 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG); 1502 debug_dump_.capture.event_msg->set_type(audioproc::Event::CONFIG);
1480 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config); 1503 debug_dump_.capture.event_msg->mutable_config()->CopyFrom(config);
1481 1504
1482 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(), 1505 RETURN_ON_ERR(WriteMessageToDebugFile(debug_dump_.debug_file.get(),
1506 &debug_dump_.num_bytes_left_for_log_,
1483 &crit_debug_, &debug_dump_.capture)); 1507 &crit_debug_, &debug_dump_.capture));
1484 return kNoError; 1508 return kNoError;
1485 } 1509 }
1486 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP 1510 #endif // WEBRTC_AUDIOPROC_DEBUG_DUMP
1487 1511
1488 } // namespace webrtc 1512 } // namespace webrtc
OLDNEW
« no previous file with comments | « webrtc/modules/audio_processing/audio_processing_impl.h ('k') | webrtc/modules/audio_processing/include/audio_processing.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698