版本记录
版本号 | 时间 |
---|---|
V1.0 | 2019.03.13 星期三 |
前言
ios 11+
和macOS 10.13+
新出了Vision
框架,提供了人脸识别、物体检测、物体跟踪等技术,它是基于Core ML的。可以说是人工智能的一部分,接下来几篇我们就详细的解析一下Vision框架。感兴趣的看下面几篇文章。
1. Vision框架详细解析(一) —— 基本概览(一)
2. Vision框架详细解析(二) —— 基于Vision的人脸识别(一)
源码
1. Swift
首先看下代码组织结构
接着看下xib
最后就是源码了
1. FaceDetectionViewController.swift
import AVFoundation
import UIKit
import Vision
class FaceDetectionViewController: UIViewController {
var sequenceHandler = VNSequenceRequestHandler()
@IBOutlet var faceView: FaceView!
@IBOutlet var laserView: LaserView!
@IBOutlet var faceLaserLabel: UILabel!
let session = AVCaptureSession()
var previewLayer: AVCaptureVideoPreviewLayer!
let dataOutputQueue = DispatchQueue(
label: "video data queue",
qos: .userInitiated,
attributes: [],
autoreleaseFrequency: .workItem)
var faceViewHidden = false
var maxX: CGFloat = 0.0
var midY: CGFloat = 0.0
var maxY: CGFloat = 0.0
override func viewDidLoad() {
super.viewDidLoad()
configureCaptureSession()
laserView.isHidden = true
maxX = view.bounds.maxX
midY = view.bounds.midY
maxY = view.bounds.maxY
session.startRunning()
}
}
// MARK: - Gesture methods
extension FaceDetectionViewController {
@IBAction func handleTap(_ sender: UITapGestureRecognizer) {
faceView.isHidden.toggle()
laserView.isHidden.toggle()
faceViewHidden = faceView.isHidden
if faceViewHidden {
faceLaserLabel.text = "Lasers"
} else {
faceLaserLabel.text = "Face"
}
}
}
// MARK: - Video Processing methods
extension FaceDetectionViewController {
func configureCaptureSession() {
// Define the capture device we want to use
guard let camera = AVCaptureDevice.default(.builtInWideAngleCamera,
for: .video,
position: .front) else {
fatalError("No front video camera available")
}
// Connect the camera to the capture session input
do {
let cameraInput = try AVCaptureDeviceInput(device: camera)
session.addInput(cameraInput)
} catch {
fatalError(error.localizedDescription)
}
// Create the video data output
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: dataOutputQueue)
videoOutput.videoSettings = [kCVPixelBufferPixelFormatTypeKey as String: kCVPixelFormatType_32BGRA]
// Add the video output to the capture session
session.addOutput(videoOutput)
let videoConnection = videoOutput.connection(with: .video)
videoConnection?.videoOrientation = .portrait
// Configure the preview layer
previewLayer = AVCaptureVideoPreviewLayer(session: session)
previewLayer.videoGravity = .resizeAspectFill
previewLayer.frame = view.bounds
view.layer.insertSublayer(previewLayer, at: 0)
}
}
// MARK: - AVCaptureVideoDataOutputSampleBufferDelegate methods
extension FaceDetectionViewController: AVCaptureVideoDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
// 1
guard let imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer) else {
return
}
// 2
let detectFaceRequest = VNDetectFaceLandmarksRequest(completionHandler: detectedFace)
// 3
do {
try sequenceHandler.perform(
[detectFaceRequest],
on: imageBuffer,
orientation: .leftMirrored)
} catch {
print(error.localizedDescription)
}
}
}
extension FaceDetectionViewController {
func convert(rect: CGRect) -> CGRect {
// 1
let origin = previewLayer.layerPointConverted(fromCaptureDevicePoint: rect.origin)
// 2
let size = previewLayer.layerPointConverted(fromCaptureDevicePoint: rect.size.cgPoint)
// 3
return CGRect(origin: origin, size: size.cgSize)
}
// 1
func landmark(point: CGPoint, to rect: CGRect) -> CGPoint {
// 2
let absolute = point.absolutePoint(in: rect)
// 3
let converted = previewLayer.layerPointConverted(fromCaptureDevicePoint: absolute)
// 4
return converted
}
func landmark(points: [CGPoint]?, to rect: CGRect) -> [CGPoint]? {
guard let points = points else {
return nil
}
return points.compactMap { landmark(point: $0, to: rect) }
}
func updateFaceView(for result: VNFaceObservation) {
defer {
DispatchQueue.main.async {
self.faceView.setNeedsDisplay()
}
}
let box = result.boundingBox
faceView.boundingBox = convert(rect: box)
guard let landmarks = result.landmarks else {
return
}
if let leftEye = landmark(
points: landmarks.leftEye?.normalizedPoints,
to: result.boundingBox) {
faceView.leftEye = leftEye
}
if let rightEye = landmark(
points: landmarks.rightEye?.normalizedPoints,
to: result.boundingBox) {
faceView.rightEye = rightEye
}
if let leftEyebrow = landmark(
points: landmarks.leftEyebrow?.normalizedPoints,
to: result.boundingBox) {
faceView.leftEyebrow = leftEyebrow
}
if let rightEyebrow = landmark(
points: landmarks.rightEyebrow?.normalizedPoints,
to: result.boundingBox) {
faceView.rightEyebrow = rightEyebrow
}
if let nose = landmark(
points: landmarks.nose?.normalizedPoints,
to: result.boundingBox) {
faceView.nose = nose
}
if let outerLips = landmark(
points: landmarks.outerLips?.normalizedPoints,
to: result.boundingBox) {
faceView.outerLips = outerLips
}
if let innerLips = landmark(
points: landmarks.innerLips?.normalizedPoints,
to: result.boundingBox) {
faceView.innerLips = innerLips
}
if let faceContour = landmark(
points: landmarks.faceContour?.normalizedPoints,
to: result.boundingBox) {
faceView.faceContour = faceContour
}
}
// 1
func updateLaserView(for result: VNFaceObservation) {
// 2
laserView.clear()
// 3
let yaw = result.yaw ?? 0.0
// 4
if yaw == 0.0 {
return
}
// 5
var origins: [CGPoint] = []
// 6
if let point = result.landmarks?.leftPupil?.normalizedPoints.first {
let origin = landmark(point: point, to: result.boundingBox)
origins.append(origin)
}
// 7
if let point = result.landmarks?.rightPupil?.normalizedPoints.first {
let origin = landmark(point: point, to: result.boundingBox)
origins.append(origin)
}
// 1
let avgY = origins.map { $0.y }.reduce(0.0, +) / CGFloat(origins.count)
// 2
let focusY = (avgY < midY) ? 0.75 * maxY : 0.25 * maxY
// 3
let focusX = (yaw.doubleValue < 0.0) ? -100.0 : maxX + 100.0
// 4
let focus = CGPoint(x: focusX, y: focusY)
// 5
for origin in origins {
let laser = Laser(origin: origin, focus: focus)
laserView.add(laser: laser)
}
// 6
DispatchQueue.main.async {
self.laserView.setNeedsDisplay()
}
}
func detectedFace(request: VNRequest, error: Error?) {
// 1
guard
let results = request.results as? [VNFaceObservation],
let result = results.first
else {
// 2
faceView.clear()
return
}
if faceViewHidden {
updateLaserView(for: result)
} else {
updateFaceView(for: result)
}
}
}
2. FaceView.swift
import UIKit
import Vision
class FaceView: UIView {
var leftEye: [CGPoint] = []
var rightEye: [CGPoint] = []
var leftEyebrow: [CGPoint] = []
var rightEyebrow: [CGPoint] = []
var nose: [CGPoint] = []
var outerLips: [CGPoint] = []
var innerLips: [CGPoint] = []
var faceContour: [CGPoint] = []
var boundingBox = CGRect.zero
func clear() {
leftEye = []
rightEye = []
leftEyebrow = []
rightEyebrow = []
nose = []
outerLips = []
innerLips = []
faceContour = []
boundingBox = .zero
DispatchQueue.main.async {
self.setNeedsDisplay()
}
}
override func draw(_ rect: CGRect) {
// 1
guard let context = UIGraphicsGetCurrentContext() else {
return
}
// 2
context.saveGState()
// 3
defer {
context.restoreGState()
}
// 4
context.addRect(boundingBox)
// 5
UIColor.red.setStroke()
// 6
context.strokePath()
// 1
UIColor.white.setStroke()
if !leftEye.isEmpty {
// 2
context.addLines(between: leftEye)
// 3
context.closePath()
// 4
context.strokePath()
}
if !rightEye.isEmpty {
context.addLines(between: rightEye)
context.closePath()
context.strokePath()
}
if !leftEyebrow.isEmpty {
context.addLines(between: leftEyebrow)
context.strokePath()
}
if !rightEyebrow.isEmpty {
context.addLines(between: rightEyebrow)
context.strokePath()
}
if !nose.isEmpty {
context.addLines(between: nose)
context.strokePath()
}
if !outerLips.isEmpty {
context.addLines(between: outerLips)
context.closePath()
context.strokePath()
}
if !innerLips.isEmpty {
context.addLines(between: innerLips)
context.closePath()
context.strokePath()
}
if !faceContour.isEmpty {
context.addLines(between: faceContour)
context.strokePath()
}
}
}
3. LaserView.swift
import UIKit
struct Laser {
var origin: CGPoint
var focus: CGPoint
}
class LaserView: UIView {
private var lasers: [Laser] = []
func add(laser: Laser) {
lasers.append(laser)
}
func clear() {
lasers.removeAll()
DispatchQueue.main.async {
self.setNeedsDisplay()
}
}
override func draw(_ rect: CGRect) {
// 1
guard let context = UIGraphicsGetCurrentContext() else {
return
}
// 2
context.saveGState()
// 3
for laser in lasers {
// 4
context.addLines(between: [laser.origin, laser.focus])
context.setStrokeColor(red: 1.0, green: 1.0, blue: 1.0, alpha: 0.5)
context.setLineWidth(4.5)
context.strokePath()
// 5
context.addLines(between: [laser.origin, laser.focus])
context.setStrokeColor(red: 1.0, green: 0.0, blue: 0.0, alpha: 0.8)
context.setLineWidth(3.0)
context.strokePath()
}
// 6
context.restoreGState()
}
}
4. CoreGraphicsExtensions.swift
import CoreGraphics
func + (left: CGPoint, right: CGPoint) -> CGPoint {
return CGPoint(x: left.x + right.x, y: left.y + right.y)
}
func - (left: CGPoint, right: CGPoint) -> CGPoint {
return CGPoint(x: left.x - right.x, y: left.y - right.y)
}
func * (left: CGPoint, right: CGFloat) -> CGPoint {
return CGPoint(x: left.x * right, y: left.y * right)
}
extension CGSize {
var cgPoint: CGPoint {
return CGPoint(x: width, y: height)
}
}
extension CGPoint {
var cgSize: CGSize {
return CGSize(width: x, height: y)
}
func absolutePoint(in rect: CGRect) -> CGPoint {
return CGPoint(x: x * rect.size.width, y: y * rect.size.height) + rect.origin
}
}
5. UIViewExtension.swift
import UIKit
extension UIView {
@IBInspectable
var cornerRadius: CGFloat {
get {
return layer.cornerRadius
}
set {
layer.cornerRadius = newValue
}
}
}
下面就是实际效果了
后记
本篇主要讲述了基于Vision的人脸识别,感兴趣的给个赞或者关注~~~