Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(157)

Unified Diff: webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc

Issue 1206783002: Cleanup of iOS AudioDevice implementation (Closed) Base URL: https://chromium.googlesource.com/external/webrtc.git@master
Patch Set: Rebased Created 5 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm ('k') | webrtc/modules/modules.gyp » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
diff --git a/webrtc/modules/audio_device/android/audio_device_unittest.cc b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
similarity index 65%
copy from webrtc/modules/audio_device/android/audio_device_unittest.cc
copy to webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
index b21fd6e2004a74e841dd45930d034254ace295cf..b75f18fd9fd8cbcd925b449d8b077b41428fdd7b 100644
--- a/webrtc/modules/audio_device/android/audio_device_unittest.cc
+++ b/webrtc/modules/audio_device/ios/audio_device_unittest_ios.cc
@@ -19,14 +19,12 @@
#include "testing/gtest/include/gtest/gtest.h"
#include "webrtc/base/arraysize.h"
#include "webrtc/base/criticalsection.h"
+#include "webrtc/base/logging.h"
#include "webrtc/base/scoped_ptr.h"
#include "webrtc/base/scoped_ref_ptr.h"
-#include "webrtc/modules/audio_device/android/audio_common.h"
-#include "webrtc/modules/audio_device/android/audio_manager.h"
-#include "webrtc/modules/audio_device/android/build_info.h"
-#include "webrtc/modules/audio_device/android/ensure_initialized.h"
#include "webrtc/modules/audio_device/audio_device_impl.h"
#include "webrtc/modules/audio_device/include/audio_device.h"
+#include "webrtc/modules/audio_device/ios/audio_device_ios.h"
#include "webrtc/system_wrappers/interface/clock.h"
#include "webrtc/system_wrappers/interface/event_wrapper.h"
#include "webrtc/system_wrappers/interface/sleep.h"
@@ -41,7 +39,6 @@ using ::testing::Invoke;
using ::testing::NiceMock;
using ::testing::NotNull;
using ::testing::Return;
-using ::testing::TestWithParam;
// #define ENABLE_DEBUG_PRINTF
#ifdef ENABLE_DEBUG_PRINTF
@@ -58,26 +55,31 @@ namespace webrtc {
static const int kNumCallbacks = 10;
// Max amount of time we wait for an event to be set while counting callbacks.
static const int kTestTimeOutInMilliseconds = 10 * 1000;
+// Number of bits per PCM audio sample.
+static const int kBitsPerSample = 16;
+// Number of bytes per PCM audio sample.
+static const int kBytesPerSample = kBitsPerSample / 8;
// Average number of audio callbacks per second assuming 10ms packet size.
static const int kNumCallbacksPerSecond = 100;
// Play out a test file during this time (unit is in seconds).
-static const int kFilePlayTimeInSec = 5;
-static const int kBitsPerSample = 16;
-static const int kBytesPerSample = kBitsPerSample / 8;
+static const int kFilePlayTimeInSec = 15;
// Run the full-duplex test during this time (unit is in seconds).
// Note that first |kNumIgnoreFirstCallbacks| are ignored.
-static const int kFullDuplexTimeInSec = 5;
+static const int kFullDuplexTimeInSec = 10;
// Wait for the callback sequence to stabilize by ignoring this amount of the
// initial callbacks (avoids initial FIFO access).
// Only used in the RunPlayoutAndRecordingInFullDuplex test.
static const int kNumIgnoreFirstCallbacks = 50;
// Sets the number of impulses per second in the latency test.
+// TODO(henrika): fine tune this setting for iOS.
static const int kImpulseFrequencyInHz = 1;
// Length of round-trip latency measurements. Number of transmitted impulses
// is kImpulseFrequencyInHz * kMeasureLatencyTimeInSec - 1.
-static const int kMeasureLatencyTimeInSec = 11;
+// TODO(henrika): fine tune this setting for iOS.
+static const int kMeasureLatencyTimeInSec = 5;
// Utilized in round-trip latency measurements to avoid capturing noise samples.
-static const int kImpulseThreshold = 1000;
+// TODO(henrika): fine tune this setting for iOS.
+static const int kImpulseThreshold = 50;
static const char kTag[] = "[..........] ";
enum TransportType {
@@ -92,6 +94,7 @@ class AudioStreamInterface {
public:
virtual void Write(const void* source, int num_frames) = 0;
virtual void Read(void* destination, int num_frames) = 0;
+
protected:
virtual ~AudioStreamInterface() {}
};
@@ -100,11 +103,10 @@ class AudioStreamInterface {
// construction.
class FileAudioStream : public AudioStreamInterface {
public:
- FileAudioStream(
- int num_callbacks, const std::string& file_name, int sample_rate)
- : file_size_in_bytes_(0),
- sample_rate_(sample_rate),
- file_pos_(0) {
+ FileAudioStream(int num_callbacks,
+ const std::string& file_name,
+ int sample_rate)
+ : file_size_in_bytes_(0), sample_rate_(sample_rate), file_pos_(0) {
file_size_in_bytes_ = test::GetFileSize(file_name);
sample_rate_ = sample_rate;
EXPECT_GE(file_size_in_callbacks(), num_callbacks)
@@ -114,8 +116,8 @@ class FileAudioStream : public AudioStreamInterface {
file_.reset(new int16_t[num_16bit_samples]);
FILE* audio_file = fopen(file_name.c_str(), "rb");
EXPECT_NE(audio_file, nullptr);
- int num_samples_read = fread(
- file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
+ int num_samples_read =
+ fread(file_.get(), sizeof(int16_t), num_16bit_samples, audio_file);
EXPECT_EQ(num_samples_read, num_16bit_samples);
fclose(audio_file);
}
@@ -126,8 +128,7 @@ class FileAudioStream : public AudioStreamInterface {
// Read samples from file stored in memory (at construction) and copy
// |num_frames| (<=> 10ms) to the |destination| byte buffer.
void Read(void* destination, int num_frames) override {
- memcpy(destination,
- static_cast<int16_t*> (&file_[file_pos_]),
+ memcpy(destination, static_cast<int16_t*>(&file_[file_pos_]),
num_frames * sizeof(int16_t));
file_pos_ += num_frames;
}
@@ -166,9 +167,7 @@ class FifoAudioStream : public AudioStreamInterface {
EXPECT_NE(fifo_.get(), nullptr);
}
- ~FifoAudioStream() {
- Flush();
- }
+ ~FifoAudioStream() { Flush(); }
// Allocate new memory, copy |num_frames| samples from |source| into memory
// and add pointer to the memory location to end of the list.
@@ -180,9 +179,7 @@ class FifoAudioStream : public AudioStreamInterface {
return;
}
int16_t* memory = new int16_t[frames_per_buffer_];
- memcpy(static_cast<int16_t*> (&memory[0]),
- source,
- bytes_per_buffer_);
+ memcpy(static_cast<int16_t*>(&memory[0]), source, bytes_per_buffer_);
rtc::CritScope lock(&lock_);
fifo_->push_back(memory);
const int size = fifo_->size();
@@ -205,24 +202,21 @@ class FifoAudioStream : public AudioStreamInterface {
} else {
int16_t* memory = fifo_->front();
fifo_->pop_front();
- memcpy(destination,
- static_cast<int16_t*> (&memory[0]),
- bytes_per_buffer_);
+ memcpy(destination, static_cast<int16_t*>(&memory[0]), bytes_per_buffer_);
delete memory;
}
}
- int size() const {
- return fifo_->size();
- }
+ int size() const { return fifo_->size(); }
- int largest_size() const {
- return largest_size_;
- }
+ int largest_size() const { return largest_size_; }
int average_size() const {
- return (total_written_elements_ == 0) ? 0.0 : 0.5 + static_cast<float> (
- total_written_elements_) / (write_count_ - kNumIgnoreFirstCallbacks);
+ return (total_written_elements_ == 0)
+ ? 0.0
+ : 0.5 +
+ static_cast<float>(total_written_elements_) /
+ (write_count_ - kNumIgnoreFirstCallbacks);
}
private:
@@ -255,8 +249,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
bytes_per_buffer_(frames_per_buffer_ * sizeof(int16_t)),
play_count_(0),
rec_count_(0),
- pulse_time_(0) {
- }
+ pulse_time_(0) {}
// Insert periodic impulses in first two samples of |destination|.
void Read(void* destination, int num_frames) override {
@@ -272,7 +265,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
}
PRINT(".");
const int16_t impulse = std::numeric_limits<int16_t>::max();
- int16_t* ptr16 = static_cast<int16_t*> (destination);
+ int16_t* ptr16 = static_cast<int16_t*>(destination);
for (int i = 0; i < 2; ++i) {
*ptr16++ = impulse;
}
@@ -289,19 +282,18 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
// been transmitted (sets |pulse_time_| to value larger than zero).
return;
}
- const int16_t* ptr16 = static_cast<const int16_t*> (source);
+ const int16_t* ptr16 = static_cast<const int16_t*>(source);
std::vector<int16_t> vec(ptr16, ptr16 + num_frames);
// Find max value in the audio buffer.
int max = *std::max_element(vec.begin(), vec.end());
// Find index (element position in vector) of the max element.
- int index_of_max = std::distance(vec.begin(),
- std::find(vec.begin(), vec.end(),
- max));
+ int index_of_max =
+ std::distance(vec.begin(), std::find(vec.begin(), vec.end(), max));
if (max > kImpulseThreshold) {
PRINTD("(%d,%d)", max, index_of_max);
int64_t now_time = clock_->TimeInMilliseconds();
- int extra_delay = IndexToMilliseconds(static_cast<double> (index_of_max));
- PRINTD("[%d]", static_cast<int> (now_time - pulse_time_));
+ int extra_delay = IndexToMilliseconds(static_cast<double>(index_of_max));
+ PRINTD("[%d]", static_cast<int>(now_time - pulse_time_));
PRINTD("[%d]", extra_delay);
// Total latency is the difference between transmit time and detection
// tome plus the extra delay within the buffer in which we detected the
@@ -315,9 +307,7 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
}
}
- int num_latency_values() const {
- return latencies_.size();
- }
+ int num_latency_values() const { return latencies_.size(); }
int min_latency() const {
if (latencies_.empty())
@@ -334,9 +324,10 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
int average_latency() const {
if (latencies_.empty())
return 0;
- return 0.5 + static_cast<double> (
- std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
- latencies_.size();
+ return 0.5 +
+ static_cast<double>(
+ std::accumulate(latencies_.begin(), latencies_.end(), 0)) /
+ latencies_.size();
}
void PrintResults() const {
@@ -345,8 +336,8 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
PRINT("%d ", *it);
}
PRINT("\n");
- PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag,
- min_latency(), max_latency(), average_latency());
+ PRINT("%s[min, max, avg]=[%d, %d, %d] ms\n", kTag, min_latency(),
+ max_latency(), average_latency());
}
int IndexToMilliseconds(double index) const {
@@ -362,7 +353,6 @@ class LatencyMeasuringAudioStream : public AudioStreamInterface {
int64_t pulse_time_;
std::vector<int> latencies_;
};
-
// Mocks the AudioTransport object and proxies actions for the two callbacks
// (RecordedDataIsAvailable and NeedMorePlayData) to different implementations
// of AudioStreamInterface.
@@ -489,42 +479,37 @@ class MockAudioTransport : public AudioTransport {
int play_count_;
int rec_count_;
AudioStreamInterface* audio_stream_;
- rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream_;
};
// AudioDeviceTest test fixture.
class AudioDeviceTest : public ::testing::Test {
protected:
- AudioDeviceTest()
- : test_is_done_(EventWrapper::Create()) {
- // One-time initialization of JVM and application context. Ensures that we
- // can do calls between C++ and Java. Initializes both Java and OpenSL ES
- // implementations.
- webrtc::audiodevicemodule::EnsureInitialized();
+ AudioDeviceTest() : test_is_done_(EventWrapper::Create()) {
+ old_sev_ = rtc::LogMessage::GetLogToDebug();
+ // Set suitable logging level here. Change to rtc::LS_INFO for more verbose
+ // output. See webrtc/base/logging.h for complete list of options.
+ rtc::LogMessage::LogToDebug(rtc::LS_INFO);
+ // Add extra logging fields here (timestamps and thread id).
+ // rtc::LogMessage::LogTimestamps();
+ rtc::LogMessage::LogThreads();
// Creates an audio device using a default audio layer.
audio_device_ = CreateAudioDevice(AudioDeviceModule::kPlatformDefaultAudio);
EXPECT_NE(audio_device_.get(), nullptr);
EXPECT_EQ(0, audio_device_->Init());
- playout_parameters_ = audio_manager()->GetPlayoutAudioParameters();
- record_parameters_ = audio_manager()->GetRecordAudioParameters();
- build_info_.reset(new BuildInfo());
+ EXPECT_EQ(0,
+ audio_device()->GetPlayoutAudioParameters(&playout_parameters_));
+ EXPECT_EQ(0, audio_device()->GetRecordAudioParameters(&record_parameters_));
}
virtual ~AudioDeviceTest() {
EXPECT_EQ(0, audio_device_->Terminate());
+ rtc::LogMessage::LogToDebug(old_sev_);
}
- int playout_sample_rate() const {
- return playout_parameters_.sample_rate();
- }
- int record_sample_rate() const {
- return record_parameters_.sample_rate();
- }
- int playout_channels() const {
- return playout_parameters_.channels();
- }
- int record_channels() const {
- return record_parameters_.channels();
- }
+ // TODO(henrika): don't use hardcoded values below.
+ int playout_sample_rate() const { return playout_parameters_.sample_rate(); }
+ int record_sample_rate() const { return record_parameters_.sample_rate(); }
+ int playout_channels() const { return playout_parameters_.channels(); }
+ int record_channels() const { return record_parameters_.channels(); }
int playout_frames_per_10ms_buffer() const {
return playout_parameters_.frames_per_10ms_buffer();
}
@@ -533,7 +518,8 @@ class AudioDeviceTest : public ::testing::Test {
}
int total_delay_ms() const {
- return audio_manager()->GetDelayEstimateInMilliseconds();
+ // TODO(henrika): improve this part.
+ return 100;
}
rtc::scoped_refptr<AudioDeviceModule> audio_device() const {
@@ -544,15 +530,6 @@ class AudioDeviceTest : public ::testing::Test {
return static_cast<AudioDeviceModuleImpl*>(audio_device_.get());
}
- AudioManager* audio_manager() const {
- return audio_device_impl()->GetAndroidAudioManagerForTest();
- }
-
- AudioManager* GetAudioManager(AudioDeviceModule* adm) const {
- return static_cast<AudioDeviceModuleImpl*>(adm)->
- GetAndroidAudioManagerForTest();
- }
-
AudioDeviceBuffer* audio_device_buffer() const {
return audio_device_impl()->GetAudioDeviceBuffer();
}
@@ -566,77 +543,25 @@ class AudioDeviceTest : public ::testing::Test {
// Returns file name relative to the resource root given a sample rate.
std::string GetFileName(int sample_rate) {
- EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100);
+ EXPECT_TRUE(sample_rate == 48000 || sample_rate == 44100 ||
+ sample_rate == 16000);
char fname[64];
- snprintf(fname,
- sizeof(fname),
- "audio_device/audio_short%d",
+ snprintf(fname, sizeof(fname), "audio_device/audio_short%d",
sample_rate / 1000);
std::string file_name(webrtc::test::ResourcePath(fname, "pcm"));
EXPECT_TRUE(test::FileExists(file_name));
-#ifdef ENABLE_PRINTF
- PRINT("file name: %s\n", file_name.c_str());
+#ifdef ENABLE_DEBUG_PRINTF
+ PRINTD("file name: %s\n", file_name.c_str());
const int bytes = test::GetFileSize(file_name);
- PRINT("file size: %d [bytes]\n", bytes);
- PRINT("file size: %d [samples]\n", bytes / kBytesPerSample);
+ PRINTD("file size: %d [bytes]\n", bytes);
+ PRINTD("file size: %d [samples]\n", bytes / kBytesPerSample);
const int seconds = bytes / (sample_rate * kBytesPerSample);
- PRINT("file size: %d [secs]\n", seconds);
- PRINT("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
+ PRINTD("file size: %d [secs]\n", seconds);
+ PRINTD("file size: %d [callbacks]\n", seconds * kNumCallbacksPerSecond);
#endif
return file_name;
}
- AudioDeviceModule::AudioLayer GetActiveAudioLayer() const {
- AudioDeviceModule::AudioLayer audio_layer;
- EXPECT_EQ(0, audio_device()->ActiveAudioLayer(&audio_layer));
- return audio_layer;
- }
-
- int TestDelayOnAudioLayer(
- const AudioDeviceModule::AudioLayer& layer_to_test) {
- rtc::scoped_refptr<AudioDeviceModule> audio_device;
- audio_device = CreateAudioDevice(layer_to_test);
- EXPECT_NE(audio_device.get(), nullptr);
- AudioManager* audio_manager = GetAudioManager(audio_device.get());
- EXPECT_NE(audio_manager, nullptr);
- return audio_manager->GetDelayEstimateInMilliseconds();
- }
-
- AudioDeviceModule::AudioLayer TestActiveAudioLayer(
- const AudioDeviceModule::AudioLayer& layer_to_test) {
- rtc::scoped_refptr<AudioDeviceModule> audio_device;
- audio_device = CreateAudioDevice(layer_to_test);
- EXPECT_NE(audio_device.get(), nullptr);
- AudioDeviceModule::AudioLayer active;
- EXPECT_EQ(0, audio_device->ActiveAudioLayer(&active));
- return active;
- }
-
- bool DisableTestForThisDevice(const std::string& model) {
- return (build_info_->GetDeviceModel() == model);
- }
-
- // Volume control is currently only supported for the Java output audio layer.
- // For OpenSL ES, the internal stream volume is always on max level and there
- // is no need for this test to set it to max.
- bool AudioLayerSupportsVolumeControl() const {
- return GetActiveAudioLayer() == AudioDeviceModule::kAndroidJavaAudio;
- }
-
- void SetMaxPlayoutVolume() {
- if (!AudioLayerSupportsVolumeControl())
- return;
- uint32_t max_volume;
- EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
- EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
- }
-
- void DisableBuiltInAECIfAvailable() {
- if (audio_device()->BuiltInAECIsAvailable()) {
- EXPECT_EQ(0, audio_device()->EnableBuiltInAEC(false));
- }
- }
-
void StartPlayout() {
EXPECT_FALSE(audio_device()->PlayoutIsInitialized());
EXPECT_FALSE(audio_device()->Playing());
@@ -666,158 +591,25 @@ class AudioDeviceTest : public ::testing::Test {
EXPECT_FALSE(audio_device()->Recording());
}
- int GetMaxSpeakerVolume() const {
- uint32_t max_volume(0);
- EXPECT_EQ(0, audio_device()->MaxSpeakerVolume(&max_volume));
- return max_volume;
- }
-
- int GetMinSpeakerVolume() const {
- uint32_t min_volume(0);
- EXPECT_EQ(0, audio_device()->MinSpeakerVolume(&min_volume));
- return min_volume;
- }
-
- int GetSpeakerVolume() const {
- uint32_t volume(0);
- EXPECT_EQ(0, audio_device()->SpeakerVolume(&volume));
- return volume;
- }
-
rtc::scoped_ptr<EventWrapper> test_is_done_;
rtc::scoped_refptr<AudioDeviceModule> audio_device_;
AudioParameters playout_parameters_;
AudioParameters record_parameters_;
- rtc::scoped_ptr<BuildInfo> build_info_;
+ rtc::LoggingSeverity old_sev_;
};
TEST_F(AudioDeviceTest, ConstructDestruct) {
// Using the test fixture to create and destruct the audio device module.
}
-// We always ask for a default audio layer when the ADM is constructed. But the
-// ADM will then internally set the best suitable combination of audio layers,
-// for input and output based on if low-latency output audio in combination
-// with OpenSL ES is supported or not. This test ensures that the correct
-// selection is done.
-TEST_F(AudioDeviceTest, VerifyDefaultAudioLayer) {
- const AudioDeviceModule::AudioLayer audio_layer = GetActiveAudioLayer();
- bool low_latency_output = audio_manager()->IsLowLatencyPlayoutSupported();
- AudioDeviceModule::AudioLayer expected_audio_layer = low_latency_output ?
- AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio :
- AudioDeviceModule::kAndroidJavaAudio;
- EXPECT_EQ(expected_audio_layer, audio_layer);
-}
-
-// Verify that it is possible to explicitly create the two types of supported
-// ADMs. These two tests overrides the default selection of native audio layer
-// by ignoring if the device supports low-latency output or not.
-TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForCombinedJavaOpenSLCombo) {
- AudioDeviceModule::AudioLayer expected_layer =
- AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio;
- AudioDeviceModule::AudioLayer active_layer = TestActiveAudioLayer(
- expected_layer);
- EXPECT_EQ(expected_layer, active_layer);
-}
-
-TEST_F(AudioDeviceTest, CorrectAudioLayerIsUsedForJavaInBothDirections) {
- AudioDeviceModule::AudioLayer expected_layer =
- AudioDeviceModule::kAndroidJavaAudio;
- AudioDeviceModule::AudioLayer active_layer = TestActiveAudioLayer(
- expected_layer);
- EXPECT_EQ(expected_layer, active_layer);
-}
-
-// The Android ADM supports two different delay reporting modes. One for the
-// low-latency output path (in combination with OpenSL ES), and one for the
-// high-latency output path (Java backends in both directions). These two tests
-// verifies that the audio manager reports correct delay estimate given the
-// selected audio layer. Note that, this delay estimate will only be utilized
-// if the HW AEC is disabled.
-TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForHighLatencyOutputPath) {
- EXPECT_EQ(kHighLatencyModeDelayEstimateInMilliseconds,
- TestDelayOnAudioLayer(AudioDeviceModule::kAndroidJavaAudio));
-}
-
-TEST_F(AudioDeviceTest, UsesCorrectDelayEstimateForLowLatencyOutputPath) {
- EXPECT_EQ(kLowLatencyModeDelayEstimateInMilliseconds,
- TestDelayOnAudioLayer(
- AudioDeviceModule::kAndroidJavaInputAndOpenSLESOutputAudio));
-}
-
-// Ensure that the ADM internal audio device buffer is configured to use the
-// correct set of parameters.
-TEST_F(AudioDeviceTest, VerifyAudioDeviceBufferParameters) {
- EXPECT_EQ(playout_parameters_.sample_rate(),
- audio_device_buffer()->PlayoutSampleRate());
- EXPECT_EQ(record_parameters_.sample_rate(),
- audio_device_buffer()->RecordingSampleRate());
- EXPECT_EQ(playout_parameters_.channels(),
- audio_device_buffer()->PlayoutChannels());
- EXPECT_EQ(record_parameters_.channels(),
- audio_device_buffer()->RecordingChannels());
-}
-
-
TEST_F(AudioDeviceTest, InitTerminate) {
// Initialization is part of the test fixture.
EXPECT_TRUE(audio_device()->Initialized());
+ // webrtc::SleepMs(5 * 1000);
EXPECT_EQ(0, audio_device()->Terminate());
EXPECT_FALSE(audio_device()->Initialized());
}
-TEST_F(AudioDeviceTest, Devices) {
- // Device enumeration is not supported. Verify fixed values only.
- EXPECT_EQ(1, audio_device()->PlayoutDevices());
- EXPECT_EQ(1, audio_device()->RecordingDevices());
-}
-
-TEST_F(AudioDeviceTest, SpeakerVolumeShouldBeAvailable) {
- // The OpenSL ES output audio path does not support volume control.
- if (!AudioLayerSupportsVolumeControl())
- return;
- bool available;
- EXPECT_EQ(0, audio_device()->SpeakerVolumeIsAvailable(&available));
- EXPECT_TRUE(available);
-}
-
-TEST_F(AudioDeviceTest, MaxSpeakerVolumeIsPositive) {
- // The OpenSL ES output audio path does not support volume control.
- if (!AudioLayerSupportsVolumeControl())
- return;
- StartPlayout();
- EXPECT_GT(GetMaxSpeakerVolume(), 0);
- StopPlayout();
-}
-
-TEST_F(AudioDeviceTest, MinSpeakerVolumeIsZero) {
- // The OpenSL ES output audio path does not support volume control.
- if (!AudioLayerSupportsVolumeControl())
- return;
- EXPECT_EQ(GetMinSpeakerVolume(), 0);
-}
-
-TEST_F(AudioDeviceTest, DefaultSpeakerVolumeIsWithinMinMax) {
- // The OpenSL ES output audio path does not support volume control.
- if (!AudioLayerSupportsVolumeControl())
- return;
- const int default_volume = GetSpeakerVolume();
- EXPECT_GE(default_volume, GetMinSpeakerVolume());
- EXPECT_LE(default_volume, GetMaxSpeakerVolume());
-}
-
-TEST_F(AudioDeviceTest, SetSpeakerVolumeActuallySetsVolume) {
- // The OpenSL ES output audio path does not support volume control.
- if (!AudioLayerSupportsVolumeControl())
- return;
- const int default_volume = GetSpeakerVolume();
- const int max_volume = GetMaxSpeakerVolume();
- EXPECT_EQ(0, audio_device()->SetSpeakerVolume(max_volume));
- int new_volume = GetSpeakerVolume();
- EXPECT_EQ(new_volume, max_volume);
- EXPECT_EQ(0, audio_device()->SetSpeakerVolume(default_volume));
-}
-
// Tests that playout can be initiated, started and stopped. No audio callback
// is registered in this test.
TEST_F(AudioDeviceTest, StartStopPlayout) {
@@ -827,6 +619,15 @@ TEST_F(AudioDeviceTest, StartStopPlayout) {
StopPlayout();
}
+// Tests that recording can be initiated, started and stopped. No audio callback
+// is registered in this test.
+TEST_F(AudioDeviceTest, StartStopRecording) {
+ StartRecording();
+ StopRecording();
+ StartRecording();
+ StopRecording();
+}
+
// Verify that calling StopPlayout() will leave us in an uninitialized state
// which will require a new call to InitPlayout(). This test does not call
// StartPlayout() while being uninitialized since doing so will hit a DCHECK.
@@ -843,11 +644,8 @@ TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
MockAudioTransport mock(kPlayout);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
- kBytesPerSample,
- playout_channels(),
- playout_sample_rate(),
- NotNull(),
- _, _, _))
+ kBytesPerSample, playout_channels(),
+ playout_sample_rate(), NotNull(), _, _, _))
.Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
@@ -860,17 +658,12 @@ TEST_F(AudioDeviceTest, StartPlayoutVerifyCallbacks) {
TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
MockAudioTransport mock(kRecording);
mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
- EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
- record_frames_per_10ms_buffer(),
- kBytesPerSample,
- record_channels(),
- record_sample_rate(),
- total_delay_ms(),
- 0,
- 0,
- false,
- _))
- .Times(AtLeast(kNumCallbacks));
+ EXPECT_CALL(mock,
+ RecordedDataIsAvailable(
+ NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+ record_channels(), record_sample_rate(),
+ _, // TODO(henrika): fix delay
+ 0, 0, false, _)).Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartRecording();
@@ -878,30 +671,21 @@ TEST_F(AudioDeviceTest, StartRecordingVerifyCallbacks) {
StopRecording();
}
-
// Start playout and recording (full-duplex audio) and verify that audio is
// active in both directions.
TEST_F(AudioDeviceTest, StartPlayoutAndRecordingVerifyCallbacks) {
MockAudioTransport mock(kPlayout | kRecording);
- mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
+ mock.HandleCallbacks(test_is_done_.get(), nullptr, kNumCallbacks);
EXPECT_CALL(mock, NeedMorePlayData(playout_frames_per_10ms_buffer(),
- kBytesPerSample,
- playout_channels(),
- playout_sample_rate(),
- NotNull(),
- _, _, _))
- .Times(AtLeast(kNumCallbacks));
- EXPECT_CALL(mock, RecordedDataIsAvailable(NotNull(),
- record_frames_per_10ms_buffer(),
- kBytesPerSample,
- record_channels(),
- record_sample_rate(),
- total_delay_ms(),
- 0,
- 0,
- false,
- _))
+ kBytesPerSample, playout_channels(),
+ playout_sample_rate(), NotNull(), _, _, _))
.Times(AtLeast(kNumCallbacks));
+ EXPECT_CALL(mock,
+ RecordedDataIsAvailable(
+ NotNull(), record_frames_per_10ms_buffer(), kBytesPerSample,
+ record_channels(), record_sample_rate(),
+ _, // TODO(henrika): fix delay
+ 0, 0, false, _)).Times(AtLeast(kNumCallbacks));
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartPlayout();
StartRecording();
@@ -921,8 +705,7 @@ TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
std::string file_name = GetFileName(playout_sample_rate());
rtc::scoped_ptr<FileAudioStream> file_audio_stream(
new FileAudioStream(num_callbacks, file_name, playout_sample_rate()));
- mock.HandleCallbacks(test_is_done_.get(),
- file_audio_stream.get(),
+ mock.HandleCallbacks(test_is_done_.get(), file_audio_stream.get(),
num_callbacks);
// SetMaxPlayoutVolume();
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
@@ -931,6 +714,12 @@ TEST_F(AudioDeviceTest, RunPlayoutWithFileAsSource) {
StopPlayout();
}
+TEST_F(AudioDeviceTest, Devices) {
+ // Device enumeration is not supported. Verify fixed values only.
+ EXPECT_EQ(1, audio_device()->PlayoutDevices());
+ EXPECT_EQ(1, audio_device()->RecordingDevices());
+}
+
// Start playout and recording and store recorded data in an intermediate FIFO
// buffer from which the playout side then reads its samples in the same order
// as they were stored. Under ideal circumstances, a callback sequence would
@@ -950,15 +739,14 @@ TEST_F(AudioDeviceTest, RunPlayoutAndRecordingInFullDuplex) {
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
rtc::scoped_ptr<FifoAudioStream> fifo_audio_stream(
new FifoAudioStream(playout_frames_per_10ms_buffer()));
- mock.HandleCallbacks(test_is_done_.get(),
- fifo_audio_stream.get(),
+ mock.HandleCallbacks(test_is_done_.get(), fifo_audio_stream.get(),
kFullDuplexTimeInSec * kNumCallbacksPerSecond);
- SetMaxPlayoutVolume();
+ // SetMaxPlayoutVolume();
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
StartRecording();
StartPlayout();
- test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,
- 1000 * kFullDuplexTimeInSec));
+ test_is_done_->Wait(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kFullDuplexTimeInSec));
StopPlayout();
StopRecording();
EXPECT_LE(fifo_audio_stream->average_size(), 10);
@@ -980,16 +768,15 @@ TEST_F(AudioDeviceTest, DISABLED_MeasureLoopbackLatency) {
NiceMock<MockAudioTransport> mock(kPlayout | kRecording);
rtc::scoped_ptr<LatencyMeasuringAudioStream> latency_audio_stream(
new LatencyMeasuringAudioStream(playout_frames_per_10ms_buffer()));
- mock.HandleCallbacks(test_is_done_.get(),
- latency_audio_stream.get(),
+ mock.HandleCallbacks(test_is_done_.get(), latency_audio_stream.get(),
kMeasureLatencyTimeInSec * kNumCallbacksPerSecond);
EXPECT_EQ(0, audio_device()->RegisterAudioCallback(&mock));
- SetMaxPlayoutVolume();
- DisableBuiltInAECIfAvailable();
+ // SetMaxPlayoutVolume();
+ // DisableBuiltInAECIfAvailable();
StartRecording();
StartPlayout();
- test_is_done_->Wait(std::max(kTestTimeOutInMilliseconds,
- 1000 * kMeasureLatencyTimeInSec));
+ test_is_done_->Wait(
+ std::max(kTestTimeOutInMilliseconds, 1000 * kMeasureLatencyTimeInSec));
StopPlayout();
StopRecording();
// Verify that the correct number of transmitted impulses are detected.
« no previous file with comments | « webrtc/modules/audio_device/ios/audio_device_not_implemented_ios.mm ('k') | webrtc/modules/modules.gyp » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698