这篇文章的人脸识别与人脸检测并不能进行高层次,高深度的操作,只是可以用来识别摄像头拍摄时是否有人脸存在,以及后期对于人脸的检测。然并不能去判断这个人在这图上出现几次,是否是同一个人。。仅用于学习系统对于人脸的开发的方法
1:对于人脸的识别
首先预加载一些输入,输出的属性
1.1 加载摄像头
-(AVCaptureDevice *)cameraDevice {
if (_cameraDevice == nil) {
_cameraDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeVideo];
NSError *error = nil;
if ([_cameraDevice lockForConfiguration:&error]) {
if ([_cameraDevice isSmoothAutoFocusSupported]) {// 平滑对焦
_cameraDevice.smoothAutoFocusEnabled = YES;
}
if ([_cameraDevice isFocusModeSupported:AVCaptureFocusModeContinuousAutoFocus]) {// 自动持续对焦
_cameraDevice.focusMode = AVCaptureFocusModeContinuousAutoFocus;
}
if ([_cameraDevice isExposureModeSupported:AVCaptureExposureModeContinuousAutoExposure ]) {// 自动持续曝光
_cameraDevice.exposureMode = AVCaptureExposureModeContinuousAutoExposure;}
if ([_cameraDevice isWhiteBalanceModeSupported:AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance]) {// 自动持续白平衡
_cameraDevice.whiteBalanceMode = AVCaptureWhiteBalanceModeContinuousAutoWhiteBalance;
}
[_cameraDevice unlockForConfiguration];
}}
return _cameraDevice;}
1.2 加载 核心组件AVCaptureSession
-(AVCaptureSession *)session {
if (_session == nil) {
_session = [[AVCaptureSession alloc] init];
_session.sessionPreset = AVCaptureSessionPresetHigh;
// 2、设置输入:由于模拟器没有摄像头,因此最好做一个判断
NSError *error = nil;
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:self.cameraDevice error:&error];
// [_device unlockForConfiguration];
if (error) {
NSLog(@"没有摄像设备");
}else {
if ([_session canAddInput:input]) {
[_session addInput:input];
NSError *error = nil;
[_cameraDevice lockForConfiguration:&error];
//调焦距,放大焦距
if (_cameraDevice.activeFormat.videoMaxZoomFactor > 2) {
_cameraDevice.videoZoomFactor = 2;
}else{
_cameraDevice.videoZoomFactor =_cameraDevice.activeFormat.videoMaxZoomFactor;
}}
if ([_session canAddOutput:self.videoDataOutput]) {
[_session addOutput:self.videoDataOutput];
}
if ([_session canAddOutput:self.metadataOutput]) {
[_session addOutput:self.metadataOutput];
}}}
return _session;
}
1.3 加载其他输出AVCaptureVideoDataOutput,AVCaptureMetadataOutput
-(AVCaptureMetadataOutput *)metadataOutput {
if (_metadataOutput == nil) {
_metadataOutput = [[AVCaptureMetadataOutput alloc]init];
[_metadataOutput setMetadataObjectsDelegate:self queue:self.queue];
}
return _metadataOutput;
}
-(AVCaptureVideoDataOutput *)videoDataOutput {
if (_videoDataOutput == nil) {
_videoDataOutput = [[AVCaptureVideoDataOutput alloc] init];
_videoDataOutput.alwaysDiscardsLateVideoFrames = YES;
_videoDataOutput.videoSettings = @{(id)kCVPixelBufferPixelFormatTypeKey:self.outPutSetting};
}
return _videoDataOutput;
}
1.3 session的开始,与停止
[self.session startRunning];
[self.session stopRunning];
1.4 温馨提示:加入对于摄像头权限的判断
2 人脸识别(系统)AVCaptureMetadataOutputObjectsDelegate 主要是这个代理方法,大家可以去看看这个API
2.1 这个方法用于检测摄像头里是否存在人脸
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputMetadataObjects:(NSArray *)metadataObjects fromConnection:(AVCaptureConnection *)connection{
if (metadataObjects.count) {
AVMetadataMachineReadableCodeObject *metadataObject = metadataObjects.firstObject;
AVMetadataObject *transformedMetadataObject = [self.previewLayer transformedMetadataObjectForMetadataObject:metadataObject];
CGRect faceRegion = transformedMetadataObject.bounds;
if (metadataObject.type == AVMetadataObjectTypeFace) {
NSLog(@"是否包含头像:%d, facePathRect: %@, faceRegion: %@",CGRectContainsRect(self.faceDetectionFrame, faceRegion),NSStringFromCGRect(self.faceDetectionFrame),NSStringFromCGRect(faceRegion));
// 为videoDataOutput设置代理,程序就会自动调用下面的代理方法,捕获每一帧图像
NSLog(@"%@", metadataObjects);
if (!self.videoDataOutput.sampleBufferDelegate) {
[self.videoDataOutput setSampleBufferDelegate:self queue:self.queue];
}}}}
2.2 从输出的数据流捕捉单一的图像帧,回调频率很快。AVCaptureVideoDataOutputSampleBufferDelegate
-(void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection {
if ([self.outPutSetting isEqualToNumber:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange]] || [self.outPutSetting isEqualToNumber:[NSNumber numberWithInt:kCVPixelFormatType_420YpCbCr8BiPlanarFullRange]]) {
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
if ([captureOutput isEqual:self.videoDataOutput]) {
UIImage *image = [UIImage getImageStream:imageBuffer];//获取到人脸图片了
// 图片获取将videoDataOutput的代理去掉,防止频繁调用AVCaptureVideoDataOutputSampleBufferDelegate方法而引起的“混乱”
if (self.videoDataOutput.sampleBufferDelegate) {
[self.videoDataOutput setSampleBufferDelegate:nil queue:self.queue];
}}} else {
NSLog(@"输出格式不支持");
}}
2.3 至此,我们已经从摄像头中检测到了人脸的图片了,但是却无法进行人脸的识别,因此我们要用到ciimage
3 .人脸检测
3.1 由于摄像头获取的图片比较大,我们要将图片进行压缩,最好是保证图片控件的大小跟图片压缩之后的一致
- (UIImage*)imageByScalingAndCroppingForSize:(CGSize)targetSize
{
UIImage *sourceImage = _IDImage;
UIImage *newImage = nil;
CGSize imageSize = sourceImage.size;
CGFloat width = imageSize.width;
CGFloat height = imageSize.height;
CGFloat targetWidth = targetSize.width;
CGFloat targetHeight = targetSize.height;
CGFloat scaleFactor = 0.0;
CGFloat scaledWidth = targetWidth;
CGFloat scaledHeight = targetHeight;
CGPoint thumbnailPoint = CGPointMake(0.0,0.0);
if (CGSizeEqualToSize(imageSize, targetSize) == NO)
{
CGFloat widthFactor = targetWidth / width;
CGFloat heightFactor = targetHeight / height;
if (widthFactor > heightFactor)
scaleFactor = widthFactor; // scale to fit height
else
scaleFactor = heightFactor; // scale to fit width
scaledWidth= width * scaleFactor;
scaledHeight = height * scaleFactor;
// center the image
if (widthFactor > heightFactor)
{
thumbnailPoint.y = (targetHeight - scaledHeight) * 0.5;
}
else if (widthFactor < heightFactor)
{
thumbnailPoint.x = (targetWidth - scaledWidth) * 0.5;
}
}
UIGraphicsBeginImageContext(targetSize); // this will crop
CGRect thumbnailRect = CGRectZero;
thumbnailRect.origin = thumbnailPoint;
thumbnailRect.size.width= scaledWidth;
thumbnailRect.size.height = scaledHeight;
[sourceImage drawInRect:thumbnailRect];
newImage = UIGraphicsGetImageFromCurrentImageContext();
if(newImage == nil)
NSLog(@"could not scale image");
//pop the context to get back to the default
UIGraphicsEndImageContext();
return newImage;
}
3.2 人脸检测 featuresInImage,CIFaceFeature
-(void)recognitionFaces{
CIContext * context = [CIContext contextWithOptions:nil];
UIImage * imageInput = [_IDImageView image];
CIImage * image = [CIImage imageWithCGImage:imageInput.CGImage];
NSDictionary * param = [NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
CIDetector * faceDetector = [CIDetector detectorOfType:CIDetectorTypeFace context:context options:param];
NSArray * detectResult = [faceDetector featuresInImage:image];//主要方法
UIView * resultView = [[UIView alloc] initWithFrame:CGRectMake(10, 135.5, 355, 238)];//_IDImageView.frame一致
[self.view addSubview:resultView];
for (CIFaceFeature * faceFeature in detectResult) {
//detectResult 这个数组就是有多少个人头
UIView *faceView = [[UIView alloc] initWithFrame:faceFeature.bounds];//脸
faceView.layer.borderColor = [UIColor redColor].CGColor;
faceView.layer.borderWidth = 1;
[resultView addSubview:faceView];
if (faceFeature.hasLeftEyePosition) {
UIView * leftEyeView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 5, 5)];//左眼
[leftEyeView setCenter:faceFeature.leftEyePosition];
leftEyeView.layer.borderWidth = 1;
leftEyeView.layer.borderColor = [UIColor redColor].CGColor;
[resultView addSubview:leftEyeView];
}
if (faceFeature.hasRightEyePosition) {
//右眼
UIView * rightEyeView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 5, 5)];
[rightEyeView setCenter:faceFeature.rightEyePosition];
rightEyeView.layer.borderWidth = 1;
rightEyeView.layer.borderColor = [UIColor redColor].CGColor;
[resultView addSubview:rightEyeView];
}
if (faceFeature.hasMouthPosition) {
UIView * mouthView = [[UIView alloc] initWithFrame:CGRectMake(0, 0, 10, 5)];
[mouthView setCenter:faceFeature.mouthPosition];
mouthView.layer.borderWidth = 1;
mouthView.layer.borderColor = [UIColor redColor].CGColor;
[resultView addSubview:mouthView];
}
}
[resultView setTransform:CGAffineTransformMakeScale(1, -1)];
}
这样我们就结束了,附上demo图一张