概念
实现AudioUnit录制声音和播放音频
主要功能:
- 实现低延迟的音频I/O
- 多录声音的的合成并且回访
- 回声消除,Mix两轨音频,以及均衡器,压缩器,混响器等
从以下步骤详解
设置AVAudioSession
该类具有以下作用
- 此类用来管理多个APP对音频资源的利用,
- 设置自己的APP是否和其他APP音频同时存在,还是中断其他APP声音
- 在手机调到静音模式下,自己的APP音频是否可以播放出声音
- 电话或者其他APP中断自己APP的音频的事件处理
- 指定音频输入和输出的设备(比如是听筒输出声音,还是扬声器输出声音)
- 是否支持录音,录音同时是否支持音频播放
-(void)setupSession
{
NSError *error = nil;
AVAudioSession* session = [AVAudioSession sharedInstance];
[session setCategory:AVAudioSessionCategoryPlayAndRecord withOptions:AVAudioSessionCategoryOptionDefaultToSpeaker error:&error];
[session setActive:YES error:nil];
}
二、设置Audiocomponentdescription
音频单元描述
struct AudioComponentDescription {
OSType componentType; //一个音频组件通用的独特的四字节码标识
OSType componentSubType;//由上面决定,设置相应的类型
OSType componentManufacturer;//厂商身份验证
UInt32 componentFlags;//没有明确的指定,设置0
UInt32 componentFlagsMask;//没有明确的指定,设置0
}
//实例代码
AudioComponentDescription inputDesc;
desc.componentType = kAudioUnitType_Output; // we want to ouput
desc.componentSubType = kAudioUnitSubType_RemoteIO; // we want in and ouput
desc.componentManufacturer = kAudioUnitManufacturer_Apple; // select provider
desc.componentFlags = 0; // must be zero
desc.componentFlagsMask = 0; // must be zero
用来描述unit的类型,包括均衡器,3D混音,多路混音,远端输入输出
componentType
:类型是相对应的,什么样的功能设置什么样的类型,componentSubType
是根据componentType
设置的
componentSubType
:
componentManufacturer
:厂商,一般设置kAudioUnitManufacturer_Apple
componentFlags
:没有明确指定的值,设置0
componentFlagsMask
:没有明确指定的值,设置0
- AudioComponentFindNext
AudioComponent outputComponent = AudioComponentFindNext(NULL, &inputDesc);
通过AudioComponentInstanceNew创建
AudioComponentInstanceNew(outputComponent, &audioUnit);
通过AUGraph创建
-(void)createAUGraph:(MyAUGraphStruct*)augStruct{
//Create graph
CheckError(NewAUGraph(&augStruct->graph),
"NewAUGraph failed");
//Create nodes and add to the graph
AudioComponentDescription inputcd = {0};
inputcd.componentType = kAudioUnitType_Output;
inputcd.componentSubType = kAudioUnitSubType_VoiceProcessingIO;
inputcd.componentManufacturer = kAudioUnitManufacturer_Apple;
AUNode remoteIONode;
//Add node to the graph
CheckError(AUGraphAddNode(augStruct->graph,
&inputcd,
&remoteIONode),
"AUGraphAddNode failed");
//Open the graph
CheckError(AUGraphOpen(augStruct->graph),
"AUGraphOpen failed");
//Get reference to the node
CheckError(AUGraphNodeInfo(augStruct->graph,
remoteIONode,
&inputcd,
&augStruct->remoteIOUnit),
"AUGraphNodeInfo failed");
}
设置输出格式描述
struct AudioStreamBasicDescription
{
Float64 mSampleRate; //音频频率,也是采样频率
AudioFormatID mFormatID;//音频格式
AudioFormatFlags mFormatFlags;//格式标签
UInt32 mBytesPerPacket;//每个数据包的字节数
UInt32 mFramesPerPacket;//每个数据包的帧数,固定填1
UInt32 mBytesPerFrame;//每一帧的字节数
UInt32 mChannelsPerFrame;//每一帧声道数,单声道和双声道 1/2
UInt32 mBitsPerChannel;//每条声道占的位数,采样位数/采样精度 8/16
UInt32 mReserved;//保留
};
mSampleRate
:采样率,每秒未压缩的数据的样本数=每秒的帧数
mFormatID
:音频格式,必须设置的一个值,设置PCM编码
mFormatFlags
:该元标记负责类型为AudioUnitSampleType
线性PCM样本值中的bits指定所有布局细节
mFramesPerPacket
:
mChannelsPerFrame
:每一帧声道数
mBitsPerChannel
:
- mFormatID
typedef UInt32 AudioFormatID;
CF_ENUM(AudioFormatID)
{
kAudioFormatLinearPCM = 'lpcm',
kAudioFormatAC3 = 'ac-3',
kAudioFormat60958AC3 = 'cac3',
kAudioFormatAppleIMA4 = 'ima4',
kAudioFormatMPEG4AAC = 'aac ',
kAudioFormatMPEG4CELP = 'celp',
kAudioFormatMPEG4HVXC = 'hvxc',
kAudioFormatMPEG4TwinVQ = 'twvq',
kAudioFormatMACE3 = 'MAC3',
kAudioFormatMACE6 = 'MAC6',
kAudioFormatULaw = 'ulaw',
kAudioFormatALaw = 'alaw',
kAudioFormatQDesign = 'QDMC',
kAudioFormatQDesign2 = 'QDM2'
}
AudioUnitSetProperty配置表
//1、 设置的这几个参数的含义
CF_ENUM(AudioUnitScope) {
kAudioUnitScope_Global = 0,//设置回调函数
kAudioUnitScope_Input = 1,
kAudioUnitScope_Output = 2,//设置音频格式描述的时候
kAudioUnitScope_Group = 3,
kAudioUnitScope_Part = 4,
kAudioUnitScope_Note = 5,
kAudioUnitScope_Layer = 6,
kAudioUnitScope_LayerItem = 7
};
实例代码
AudioStreamBasicDescription audioFormat;
audioFormat.mSampleRate = 44100;
audioFormat.mFormatID = kAudioFormatLinearPCM;
audioFormat.mFormatFlags = kAudioFormatFlagIsSignedInteger | kAudioFormatFlagIsPacked;
audioFormat.mFramesPerPacket = 1;
audioFormat.mChannelsPerFrame = 1;
audioFormat.mBitsPerChannel = 16;
audioFormat.mBytesPerPacket = 2;
audioFormat.mBytesPerFrame = 2;
AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Output,
INPUT_BUS,
&audioFormat,
sizeof(audioFormat));
AudioUnitSetProperty(audioUnit,
kAudioUnitProperty_StreamFormat,
kAudioUnitScope_Input,
OUTPUT_BUS,
&audioFormat,
sizeof(audioFormat));
打开输入输出端口
-(void)setupRemoteIOUnit{
//Open input of the bus 1(input mic)
UInt32 inputEnableFlag = 1;
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Input,
1,
&inputEnableFlag,
sizeof(inputEnableFlag)),
"Open input of bus 1 failed");
//Open output of bus 0(output speaker)
UInt32 outputEnableFlag = 1;
CheckError(AudioUnitSetProperty(augStruct->remoteIOUnit,
kAudioOutputUnitProperty_EnableIO,
kAudioUnitScope_Output,
0,
&outputEnableFlag,
sizeof(outputEnableFlag)),
"Open output of bus 0 failed");
}
设置回调函数
//播放回调函数
static OSStatus inputCallBackFun( void * inRefCon,
AudioUnitRenderActionFlags * ioActionFlags,
const AudioTimeStamp * inTimeStamp,
UInt32 inBusNumber,
UInt32 inNumberFrames,
AudioBufferList * __nullable ioData)
{
XBAudioUnitRecorder *recorder = (__bridge XBAudioUnitRecorder *)(inRefCon);
typeof(recorder) __weak weakRecorder = recorder;
AudioBufferList bufferList;
bufferList.mNumberBuffers = 1;
bufferList.mBuffers[0].mData = NULL;
bufferList.mBuffers[0].mDataByteSize = 0;
AudioUnitRender(recorder->audioUnit,
ioActionFlags,
inTimeStamp,
kInputBus,
inNumberFrames,
&bufferList);
if (recorder.bl_output)
{
recorder.bl_output(&bufferList);
}
if (recorder.bl_outputFull)
{
recorder.bl_outputFull(weakRecorder, ioActionFlags, inTimeStamp, inBusNumber, inNumberFrames, &bufferList);
}
return noErr;
}