import UIKit
import AVFoundation
protocol CaptureManagerDelegate: AnyObject {
func processSampleBuffer(sampleBuffer: CMSampleBuffer, type: AVMediaType)
}
class ZGCapture: NSObject {
weak var delegate: CaptureManagerDelegate?
/// 最小感光度ISO
var minISO: Float {
return currentDevice?.activeFormat.minISO ?? 0
}
/// 最大感光度ISO
var maxISO: Float {
return currentDevice?.activeFormat.maxISO ?? 0
}
/// 最小快门速度S
var minExposureDuration: CMTime {
return currentDevice?.activeFormat.minExposureDuration ?? .zero
}
/// 最大快门速度S
var maxExposureDuration: CMTime {
return currentDevice?.activeFormat.maxExposureDuration ?? .zero
}
var minExposureTargetBias: Float {
return currentDevice?.minExposureTargetBias ?? .zero
}
var maxExposureTargetBias: Float {
return currentDevice?.maxExposureTargetBias ?? .zero
}
/// 当前缩放
var zoom: CGFloat {
return currentDevice?.videoZoomFactor ?? 1
}
/// 最大缩放
var maxZoom: CGFloat {
return currentDevice?.maxAvailableVideoZoomFactor ?? 1
}
/// 最小缩放
var minZoom: CGFloat {
return currentDevice?.minAvailableVideoZoomFactor ?? 1
}
// MARK: - 曝光补偿
var exposureTargetBias: Float {
return currentDevice?.exposureTargetBias ?? .zero
}
private var autoExposureDuration: CMTime = .zero
private(set) var videoDevices = [AVCaptureDevice]()
private(set) var currentDevice: AVCaptureDevice?
private var captureSession = AVCaptureSession()
private var captureConnection: AVCaptureConnection?
private var currentVideoInput: AVCaptureDeviceInput?
private var videoQueue = DispatchQueue(label: "videoQueue")
private var previewLayer: AVCaptureVideoPreviewLayer?
private var currentBackDeviceIndex = 0
override init() {
super.init()
getVideoDevices()
initCapture(deviceIndex: 0)
}
func setPreview(preview: UIView) {
previewLayer?.removeFromSuperlayer()
previewLayer = AVCaptureVideoPreviewLayer(session: captureSession)
previewLayer?.frame = preview.bounds
previewLayer?.videoGravity = .resizeAspectFill
preview.layer.insertSublayer(previewLayer!, at: 0)
}
func startRecordVideo() {
startCapture()
}
func stopRecordVideo(){
stopCapture()
}
private func startCapture() {
if captureSession.isRunning {
captureSession.stopRunning()
}
videoQueue.async {
self.captureSession.startRunning()
}
}
// 结束采集
private func stopCapture() {
if captureSession.isRunning {
captureSession.stopRunning()
}
}
private func getVideoDevices(){
//广角、长焦
var deviceTypes:[AVCaptureDevice.DeviceType] = [.builtInWideAngleCamera, .builtInTelephotoCamera]
if #available(iOS 10.2, *) {
//双摄广角
deviceTypes.append(.builtInDualCamera)
}
if #available(iOS 11.1, *) {
//组合
deviceTypes.append(.builtInTrueDepthCamera)
}
if #available(iOS 13.0, *) {
//超广角、超广角+广角、超广角+广角+长焦
deviceTypes += [.builtInUltraWideCamera, .builtInDualWideCamera, .builtInTripleCamera]
}
let deviceSession = AVCaptureDevice.DiscoverySession(deviceTypes: deviceTypes, mediaType: .video, position: .back)
videoDevices = deviceSession.devices
}
///选择或切换后置镜头
func initCapture(deviceIndex: Int){
guard deviceIndex < videoDevices.count else {
return
}
let videoDevice = videoDevices[deviceIndex]
currentBackDeviceIndex = deviceIndex
DispatchQueue.global().async {
self.initDevice(device: videoDevice)
}
}
///切换前后置摄像头
func switchPosition(position: AVCaptureDevice.Position){
if position == .front,
let device = AVCaptureDevice.DiscoverySession(deviceTypes: [.builtInWideAngleCamera], mediaType: .video, position: .front).devices.first {
setTorchMode(mode: .off)
initDevice(device: device)
}
else{
initCapture(deviceIndex: currentBackDeviceIndex)
}
}
private func initDevice(device: AVCaptureDevice){
let isRestoration = captureSession.isRunning
stopCapture()
// 设置输入源
guard let videoInput = try? AVCaptureDeviceInput(device: device) else {
return
}
// 记录当前采集设备
currentDevice = device
autoExposureDuration = device.exposureDuration
// 设置输出源
let videoOutput = AVCaptureVideoDataOutput()
videoOutput.setSampleBufferDelegate(self, queue: videoQueue)
// 抛弃过期帧
videoOutput.alwaysDiscardsLateVideoFrames = true
// 设置输出格式
videoOutput.videoSettings = [
kCVPixelBufferPixelFormatTypeKey as String: Int(kCVPixelFormatType_420YpCbCr8BiPlanarVideoRange),
]
captureSession.beginConfiguration()
//移除旧源
if let currentVideoDeviceInput = captureSession.inputs.first as? AVCaptureDeviceInput {
captureSession.removeInput(currentVideoDeviceInput)
}
if let currentVideoOutput = captureSession.outputs.first {
captureSession.removeOutput(currentVideoOutput)
}
//添加源
if captureSession.canAddInput(videoInput) {
captureSession.addInput(videoInput)
currentVideoInput = videoInput
}
if captureSession.canAddOutput(videoOutput) {
captureSession.addOutput(videoOutput)
}
captureSession.commitConfiguration()
captureConnection = videoOutput.connection(with: AVMediaType.video)
// 视频录制方向
setupVideoOrientation(orientation: .portrait)
if isRestoration {
startCapture()
}
}
private func setupVideoOrientation(deviceOrientation: UIDeviceOrientation) {
// 设置视频方向
var videoOrientation: AVCaptureVideoOrientation = .portrait
switch deviceOrientation {
case .portrait:
videoOrientation = .portrait
case .portraitUpsideDown:
videoOrientation = .portraitUpsideDown
case .landscapeLeft:
videoOrientation = .landscapeLeft
case .landscapeRight:
videoOrientation = .landscapeRight
default:
break
}
self.setupVideoOrientation(orientation: videoOrientation)
}
/// 设置视频方向
private func setupVideoOrientation(orientation: AVCaptureVideoOrientation) {
if (self.captureConnection?.isVideoOrientationSupported ?? false) {
captureConnection?.videoOrientation = orientation
}
}
//设置分辨率和帧率,因为耗时,在未设置完又beginConfiguration会出错,所以加个Queue用于取消
private let setResolutionAndFpsQueue = DispatchQueue(label: "serialQueue")
private var workItem: DispatchWorkItem?
func setResolutionAndFps(width: Int, height: Int, frameRate: Float64) {
workItem?.cancel()
workItem = DispatchWorkItem { [self] in
guard let captureDevice = currentDevice else { return }
let size = CGSize(width: width, height: height)
for vFormat in captureDevice.formats {
let maxRate = vFormat.videoSupportedFrameRateRanges.first?.maxFrameRate ?? 30
guard maxRate >= frameRate else { continue }
let description = vFormat.formatDescription
let dims = CMVideoFormatDescriptionGetDimensions(description)
//分辨率
if dims.width == Int32(size.width) && dims.height == Int32(size.height) {
//帧率
captureSession.beginConfiguration()
do {
try captureDevice.lockForConfiguration()
captureDevice.activeFormat = vFormat
captureDevice.activeVideoMinFrameDuration = CMTimeMake(value: 1, timescale: Int32(frameRate))
captureDevice.activeVideoMaxFrameDuration = CMTimeMake(value: 1, timescale: Int32(frameRate))
captureDevice.unlockForConfiguration()
captureSession.commitConfiguration()
break
} catch {
captureSession.commitConfiguration()
}
}
}
print("设置分辨率:\(size) 帧率:\(frameRate)")
}
setResolutionAndFpsQueue.async(execute: workItem!)
}
/// 设置ISO/快门自动
func setAutoMode() {
guard let currentDevice = currentDevice,
currentDevice.isExposureModeSupported(.autoExpose)
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.setExposureModeCustom(duration: autoExposureDuration, iso: currentDevice.iso) { [weak currentDevice] _ in
currentDevice?.exposureMode = .autoExpose
currentDevice?.unlockForConfiguration()
}
}
/// 感光度 ISO
func setISO(value: Float) {
guard let currentDevice = currentDevice else {
return
}
try? currentDevice.lockForConfiguration()
let clampedISO = min(max(value, minISO), maxISO) // 确保ISO值在范围内
currentDevice.setExposureModeCustom(duration: AVCaptureDevice.currentExposureDuration, iso: clampedISO, completionHandler: nil)
currentDevice.unlockForConfiguration()
}
/// 快门速度
func setShutterSpeed(value: Float) {
guard let currentDevice = currentDevice else {
return
}
let maxValue = maxExposureDuration.seconds
let minValue = minExposureDuration.seconds
let value = Double(value)
var duration: CMTime
if value < minValue {
duration = minExposureDuration
} else if value > maxValue {
duration = maxExposureDuration
} else {
let scale: Int32 = 10000
let v = max(Int64(value * Double(scale)), 1)
duration = CMTimeMake(value: v, timescale: scale)
}
try? currentDevice.lockForConfiguration()
currentDevice.setExposureModeCustom(duration: duration, iso: currentDevice.iso) { [weak currentDevice] _ in
currentDevice?.exposureMode = .custom
currentDevice?.unlockForConfiguration()
}
}
// 对焦模式
func setFocus(mode: AVCaptureDevice.FocusMode, point: CGPoint? = nil) {
guard let currentDevice = currentDevice,
currentDevice.isFocusModeSupported(mode)
else { return }
try? currentDevice.lockForConfiguration()
if currentDevice.isFocusPointOfInterestSupported,
let point = point,
let castPoint = previewLayer?.captureDevicePointConverted(fromLayerPoint: point) {
currentDevice.focusPointOfInterest = castPoint
}
currentDevice.focusMode = mode
currentDevice.unlockForConfiguration()
}
func setFocus(lensPosition: Float) {
guard let currentDevice = currentDevice,
currentDevice.isLockingFocusWithCustomLensPositionSupported
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.setFocusModeLocked(lensPosition: lensPosition)
currentDevice.unlockForConfiguration()
}
// 设置曝光量 EV (-8 -- 8)
func setExposure(value: Float) {
guard let currentDevice = currentDevice,
currentDevice.isExposureModeSupported(.locked)
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.setExposureTargetBias(value)
currentDevice.unlockForConfiguration()
}
/// 设置白平衡模式 WB
func setWhiteBalance(mode: AVCaptureDevice.WhiteBalanceMode) {
guard let currentDevice = currentDevice,
currentDevice.isWhiteBalanceModeSupported(mode)
else { return }
try? currentDevice.lockForConfiguration()
currentDevice.whiteBalanceMode = mode
currentDevice.unlockForConfiguration()
}
/// 设置白平衡量 WB
func setWhiteBalance(temperature: Float) {
guard let currentDevice = currentDevice,
currentDevice.isWhiteBalanceModeSupported(.locked)
else { return }
let temperatureAndTintValues = AVCaptureDevice.WhiteBalanceTemperatureAndTintValues(temperature: temperature, tint: 0)
let whiteBalanceGains = currentDevice.deviceWhiteBalanceGains(for: temperatureAndTintValues)
let maxWhiteBalanceGain = currentDevice.maxWhiteBalanceGain
var fixWhiteBalanceGains = whiteBalanceGains
fixWhiteBalanceGains.redGain = max(1.0, min(maxWhiteBalanceGain, whiteBalanceGains.redGain))
fixWhiteBalanceGains.greenGain = max(1.0, min(maxWhiteBalanceGain, whiteBalanceGains.greenGain))
fixWhiteBalanceGains.blueGain = max(1.0, min(maxWhiteBalanceGain, whiteBalanceGains.blueGain))
try? currentDevice.lockForConfiguration()
currentDevice.setWhiteBalanceModeLocked(with: fixWhiteBalanceGains)
currentDevice.unlockForConfiguration()
}
///缩放
func setZoom(factor: CGFloat){
guard let currentDevice = currentDevice else { return }
let minZoom = currentDevice.minAvailableVideoZoomFactor
let maxZoom = currentDevice.maxAvailableVideoZoomFactor
let zoom = min(maxZoom, max(factor, minZoom))
try? currentDevice.lockForConfiguration()
currentDevice.ramp(toVideoZoomFactor: zoom, withRate: 4.0)
currentDevice.unlockForConfiguration()
}
///闪光灯
func setTorchMode(mode: AVCaptureDevice.TorchMode){
guard let currentDevice = currentDevice else { return }
try? currentDevice.lockForConfiguration()
currentDevice.torchMode = mode
currentDevice.unlockForConfiguration()
}
}
extension ZGCapture: AVCaptureVideoDataOutputSampleBufferDelegate, AVCaptureAudioDataOutputSampleBufferDelegate {
func captureOutput(_ output: AVCaptureOutput, didOutput sampleBuffer: CMSampleBuffer, from connection: AVCaptureConnection) {
if connection == captureConnection {
// sampleBuffer 就是我们拿到的画面,美颜等操作都是对 sampleBuffer 进行的
// print("已经采集视频—-video")
delegate?.processSampleBuffer(sampleBuffer: sampleBuffer, type: .video)
} else {
print("已经采集音频--audio")
}
}
}
Swift-AVCapture视频采集
最后编辑于 :
©著作权归作者所有,转载或内容合作请联系作者
- 文/潘晓璐 我一进店门,熙熙楼的掌柜王于贵愁眉苦脸地迎上来,“玉大人,你说我怎么就摊上这事。” “怎么了?”我有些...
- 文/花漫 我一把揭开白布。 她就那样静静地躺着,像睡着了一般。 火红的嫁衣衬着肌肤如雪。 梳的纹丝不乱的头发上,一...
- 文/苍兰香墨 我猛地睁开眼,长吁一口气:“原来是场噩梦啊……” “哼!你这毒妇竟也来了?” 一声冷哼从身侧响起,我...
推荐阅读更多精彩内容
- 概述 音视频采集是直播架构的第一步 音视频采集包括两部分视频采集音频采集 iOS 开发中,同音视频采集相关 API...
- 本章介绍一下视频采集的实现,主要有功能有1.音、视频文件录制播放2.焦距设置3.防抖功能4.摄像头切换5.手电筒功...
- 您有好的视频,我有好的平台——优质教学视频采集中... 1、平台简介:“菠萝微课”是一个全科性的优质教学视频与图文...
- 音视频流媒体开发-目录[https://www.jianshu.com/p/5a868a667838]iOS知识点...
- 整体架构 以LFLiveSession为中心切分成3部分: 前面是音视频的数据采集 后面是音视频数据推送到服务器 ...