音频设备同样抽象出音频设备对象:
class AudioDevice {
public:
AudioDevice(const std::string& id, const std::string& name) :device_id(id), device_name(name) {
}
virtual ~AudioDevice() {
}
public:
std::string device_id;
std::string device_name;
};
设备集合:
class AudioDeviceCollection
{
public:
AudioDeviceCollection();
virtual ~AudioDeviceCollection();
private:
void addAudioDevice(std::shared_ptr<AudioDevice> device);
std::vector<std::shared_ptr<AudioDevice>> audio_devices_;
public:
int getCount();
std::shared_ptr<AudioDevice> getDevice(int index);
int getDevice(int index, std::string &deviceName, std::string &deviceId);
int searchDevice(const std::string &deviceId);
friend class XXX::AudioDeviceManager;
};
音频设备枚举,因为需要使用到webrtc内部的音频设备管理器,而其使用只能在webrtc的几个固定线程中,否则抛异常,故首先需要创建adm对象,并且需要做一个统一的webrtc线程管理对象,需要使用的线程都丢到那里面:
AudioDeviceManager::AudioDeviceManager():/*audio_sink_(nullptr), */recording_devices_enumerated_(false), playout_devices_enumerated_(false)
{
audio_recording_collection_ = std::make_shared<XXX_PUBLIC::AudioDeviceCollection>();
audio_playout_collection_ = std::make_shared<XXX_PUBLIC::AudioDeviceCollection>();
XXXThreadManager::getInstance()->worker_thread_->Invoke<void>(RTC_FROM_HERE, [&]() {
adm_ = webrtc::AudioDeviceModule::Create(webrtc::AudioDeviceModule::kPlatformDefaultAudio);
adm_->Init();
});
EnableBuiltInAEC(false);
}
void AudioDeviceManager::init()
{
XXXThreadManager::getInstance()->worker_thread_->Invoke<void>(RTC_FROM_HERE, [&]() {
if (adm_) {
adm_->Init();
}
});
}
枚举音频采集设备:
std::shared_ptr<XXX_PUBLIC::AudioDeviceCollection> AudioDeviceManager::enumerateRecordingDevices()
{
if (recording_devices_enumerated_) {
return audio_recording_collection_;
}
XXXThreadManager::getInstance()->worker_thread_->Invoke<void>(RTC_FROM_HERE, [&]() {
int num_devices = adm_->RecordingDevices();
for (int i = 0; i < num_devices; i++)
{
char name[webrtc::kAdmMaxDeviceNameSize];
char id[webrtc::kAdmMaxGuidSize];
if (adm_->RecordingDeviceName(i, name, id) != -1)
{
std::shared_ptr<XXX_PUBLIC::AudioDevice> device = std::make_shared<XXX_PUBLIC::AudioDevice>(id, name);
audio_recording_collection_->addAudioDevice(device);
}
}
});
recording_devices_enumerated_ = true;
return audio_recording_collection_;
}
枚举音频播放设备:
std::shared_ptr<XXX_PUBLIC::AudioDeviceCollection> AudioDeviceManager::enumeratePlayoutDevices()
{
if (playout_devices_enumerated_) {
return audio_playout_collection_;
}
XXXThreadManager::getInstance()->worker_thread_->Invoke<void>(RTC_FROM_HERE, [&]() {
int num_devices = adm_->PlayoutDevices();
for (int i = 0; i < num_devices; i++)
{
char name[webrtc::kAdmMaxDeviceNameSize];
char id[webrtc::kAdmMaxGuidSize];
if (adm_->PlayoutDeviceName(i, name, id) != -1)
{
std::shared_ptr<xxx_PUBLIC::AudioDevice> device = std::make_shared<XXX_PUBLIC::AudioDevice>(id, name);
audio_playout_collection_->addAudioDevice(device);
}
}
});
playout_devices_enumerated_ = true;
return audio_playout_collection_;
}
设置音频数据回调:
int AudioDeviceManager::RegisterAudioSink(webrtc::AudioTransport *audio_sink)
{
int iRet = XXXThreadManager::getInstance()->worker_thread_->Invoke<int>(RTC_FROM_HERE, [=]() {
return adm_->RegisterAudioCallback(audio_sink);
});
return iRet;
}
管理对象总体接口(当然我们还需要在外部封装一层,屏蔽webrtc的头文件):
class AudioDeviceManager : public webrtc::AudioTransport
{
public:
static AudioDeviceManager *getInstance();
virtual ~AudioDeviceManager();
private:
AudioDeviceManager();
public:
std::shared_ptr<XXX_PUBLIC::AudioDeviceCollection> enumerateRecordingDevices();
std::shared_ptr<XXX_PUBLIC::AudioDeviceCollection> enumeratePlayoutDevices();
int setRecordingDevice(const std::string& deviceId);
int setRecordingDevice(std::shared_ptr<XXX_PUBLIC::AudioDevice> audio_device);
int setRecordingDeviceVolume(uint32_t volume);
int getRecordingDeviceVolume();
int setRecordingDeviceMute(bool mute);
int getRecordingDeviceMute(bool* mute);
int EnableBuiltInAEC(bool enable);
int RegisterAudioSink(webrtc::AudioTransport *audio_sink);
int initRecording();
int startRecording();
int setPlayoutDevice(const std::string& deviceId);
int setPlayoutDevice(std::shared_ptr<XXX_PUBLIC::AudioDevice> audio_device);
int setPlayuotDeviceVolume(uint32_t volume);
int setPlayoutDeviceMute(bool mute);
int getPlayoutDeviceMute(bool* mute);
int getPlayoutDeviceVolume();
int startPlayout();
void init();
public:
rtc::scoped_refptr<webrtc::AudioDeviceModule> adm_;
rtc::scoped_refptr<webrtc::AudioDeviceModule> getADM();
private:
static AudioDeviceManager *instance_;
std::shared_ptr<XXX_PUBLIC::AudioDeviceCollection> audio_recording_collection_;
std::shared_ptr<XXX_PUBLIC::AudioDeviceCollection> audio_playout_collection_;
std::shared_ptr<XXX_PUBLIC::AudioDevice> curr_recording_device_;
std::shared_ptr<XXX_PUBLIC::AudioDevice> curr_playout_device_;
bool playout_devices_enumerated_;
bool recording_devices_enumerated_;
private:
int32_t RecordedDataIsAvailable(const void* audioSamples,
const size_t nSamples,
const size_t nBytesPerSample,
const size_t nChannels,
const uint32_t samplesPerSec,
const uint32_t totalDelayMS,
const int32_t clockDrift,
const uint32_t currentMicLevel,
const bool keyPressed,
uint32_t& newMicLevel) {
return 0;
}
// Implementation has to setup safe values for all specified out parameters.
int32_t NeedMorePlayData(const size_t nSamples,
const size_t nBytesPerSample,
const size_t nChannels,
const uint32_t samplesPerSec,
void* audioSamples,
size_t& nSamplesOut, // NOLINT
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {
return 0;
};
// Method to pull mixed render audio data from all active VoE channels.
// The data will not be passed as reference for audio processing internally.
void PullRenderData(int bits_per_sample,
int sample_rate,
size_t number_of_channels,
size_t number_of_frames,
void* audio_data,
int64_t* elapsed_time_ms,
int64_t* ntp_time_ms) {};
};