可参照iOS百度语音识别开发文档完成相关配置
一、接入指南
先引入编译需要的Framework:
AudioToolbox.framework
AVFoundation.framework
SystemConfiguration.framework
CoreTelephony.framework
Security.framework
libz.1.tbd
CFNetwork.framework
CoreLocation.framework
OpenGLES.framework
QuartzCore.framework
GLKit.framework
CoreGraphics.framework
CoreText.framework
注意事项:
1.注意 JSONKit.m 使用非arc方式编译,需在Build Phases-> Compile Sources->JSONKit.m 的 Compiler Flags 中添加 -fno-objc-arc, 若在xcode7上编译过程中,运行到 JSONKit文件中这一句
void *objectISA = (JK_EXPECT_F(workAroundMacOSXABIBreakingBug)) ? NULL : *((void **)objectPtr); 的时候程序就崩溃,那么 JSONKit 的版本有点低,下载最新的JSONKit文件将其替换即可;
2.设置App Transport Security,在项目的info.plist中,添加NSAppTransportSecurity,然后在NSAppTransportSecurity 下选择 Allow Arbitrary Loads,类型Boolean,值设为YES。
3.在BuildSettings中,设置Enable Bitcode为NO;
二、使用
在此仅使用的是BDVoiceRecognitionClient来进行语音识别。
1.引入头文件
#import "BDVoiceRecognitionClient.h"
2.配置 BDVoiceRecognitionClient
// 使用自己申请的应用的ApiKey和SecretKey替换之
[[BDVoiceRecognitionClient sharedInstance]setApiKey:@"ApiKey"withSecretKey:@"SecretKey"];
[[BDVoiceRecognitionClient sharedInstance] setLanguage:EVoiceRecognitionLanguageChinese];
[[BDVoiceRecognitionClient sharedInstance] setResourceType:RESOURCE_TYPE_NLU];
[[BDVoiceRecognitionClient sharedInstance] setPropertyList:@[@(EVoiceRecognitionPropertyWeb)]];
if ([[BDVoiceRecognitionClient sharedInstance] isCanRecorder]) {
[[BDVoiceRecognitionClient sharedInstance] startVoiceRecognition:self];
}
3.遵守MVoiceRecognitionClientDelegate协议,实现以下代理方法
根据不同的字段,可自行对结果进行处理
- (void)VoiceRecognitionClientWorkStatus:(int) aStatus obj:(id)aObj{
switch (aStatus) {
case EVoiceRecognitionClientWorkStatusStartWorkIng:
NSLog(@"开始录音");
break;
case EVoiceRecognitionClientWorkStatusStart:
NSLog(@"检测到用户开始说话");
break;
case EVoiceRecognitionClientWorkStatusEnd:
NSLog(@"结束录音");
break;
case EVoiceRecognitionClientWorkStatusFinish:
{
// 此处返回识别结果
NSLog(@"语音识别功能完成,服务器返回正确结果");
NSString * string = [aObj JSONString];
NSLog(@"识别结果: %@",string);
break;
}
default:
break;
}
}
- (void)VoiceRecognitionClientErrorStatus:(int) aStatus subStatus:(int)aSubStatus{
switch (aStatus) {
case EVoiceRecognitionClientErrorStatusClassVDP:
NSLog(@"语音数据处理过程出错");
break;
case EVoiceRecognitionClientErrorStatusUnKnow:
NSLog(@"未知错误(异常)");
break;
case EVoiceRecognitionClientErrorStatusNoSpeech:
NSLog(@"用户未说话");
break;
case EVoiceRecognitionClientErrorStatusShort:
NSLog(@"用户说话声音太短");
break;
case EVoiceRecognitionClientErrorStatusException:
NSLog(@"语音前端库检测异常");
break;
case EVoiceRecognitionClientErrorStatusClassRecord:
NSLog(@"录音出错");
break;
case EVoiceRecognitionClientErrorStatusClassLocalNet:
NSLog(@"本地网络联接出错");
break;
case EVoiceRecognitionClientErrorStatusClassServerNet:
NSLog(@"服务器返回网络错误");
break;
default:
break;
}
}
- (void)VoiceRecognitionClientNetWorkStatus:(int) aStatus{
switch (aStatus) {
case EVoiceRecognitionClientNetWorkStatusStart:
NSLog(@"网络工作开始");
break;
case EVoiceRecognitionClientNetWorkStatusEnd:
NSLog(@"网络工作完成");
break;
default:
break;
}
}