首相介绍相关的类
AVCaptureSession
AVCaptureSession
它是管理捕获活动并协调来自输入设备的数据流以捕获输出的对象。
要执行实时或离线捕获,您需要实例化一个 AVCaptureSession
对象并添加适当的输入(例如 AVCaptureDeviceInput
)和输出(例如 AVCaptureMovieFileOutput
)。 以下代码片段说明了如何配置捕获设备以记录音频:
AVCaptureSession *captureSession = [[AVCaptureSession alloc] init];
AVCaptureDevice *audioCaptureDevice = [AVCaptureDevice defaultDeviceWithMediaType:AVMediaTypeAudio];
NSError *error = nil;
AVCaptureDeviceInput *audioInput = [AVCaptureDeviceInput deviceInputWithDevice:audioCaptureDevice error:&error];
if (audioInput) {
[captureSession addInput:audioInput];
}
else {
// Handle the failure.
}
AVCaptureDevice
为捕获会话提供输入(例如音频或视频)并提供针对硬件特定捕获功能的控制的设备。
// 方法1
// Choose the back dual camera if available, otherwise default to a wide angle camera.
AVCaptureDevice *videoDevice = [AVCaptureDevice defaultDeviceWithDeviceType:AVCaptureDeviceTypeBuiltInDuoCamera mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionBack];
if ( ! videoDevice ) {
// If the back dual camera is not available, default to the back wide angle camera.
videoDevice = [AVCaptureDevice defaultDeviceWithDeviceType:AVCaptureDeviceTypeBuiltInWideAngleCamera mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionBack];
// In some cases where users break their phones, the back wide angle camera is not available. In this case, we should default to the front wide angle camera.
if ( ! videoDevice ) {
videoDevice = [AVCaptureDevice defaultDeviceWithDeviceType:AVCaptureDeviceTypeBuiltInWideAngleCamera mediaType:AVMediaTypeVideo position:AVCaptureDevicePositionFront];
}
}
// 新方法1
+ (instancetype)discoverySessionWithDeviceTypes:(NSArray<AVCaptureDeviceType> *)deviceTypes mediaType:(NSString *)mediaType position:(AVCaptureDevicePosition)position;
// 方法2
AVCaptureDevice *device = [AVCaptureDevice
defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureInput
用于向捕获会话提供输入数据的对象的抽象超类。
要将AVCaptureInput对象与会话关联,请在会话上调用 addInput:
。
AVCaptureInput对象具有一个或多个端口(AVCaptureInputPort的实例),它们可以生成每个数据流一个。 例如,呈现一个视频数据流的AVCaptureDevice对象具有一个端口。
AVCaptureDeviceInput
AVCaptureDeviceInput
它是捕获输入类,提供从捕获设备到捕获会话(AVCaptureSession
)的媒体。
AVCaptureDeviceInput
是AVCaptureInput
的一个具体子类,用于从AVCaptureDevice
对象捕获数据。
AVCaptureDeviceInput *videoDeviceInput = [AVCaptureDeviceInput deviceInputWithDevice:videoDevice error:&error];
if ( ! videoDeviceInput ) {
NSLog( @"Could not create video device input: %@", error );
return;
}
if ( [self.session canAddInput:videoDeviceInput] ) {
[self.session addInput:videoDeviceInput];
}
AVCaptureOutput
AVCaptureOutput
是描述 AVCaptureSession
对象的输出目标的抽象基类。
AVCaptureOutput
提供了一个抽象接口,用于将捕获输出目标(例如文件和视频预览)连接到捕获会话(AVCaptureSession
的实例)。 捕获输出可以具有由AVCaptureConnection
对象表示的多个连接,每个连接对象从捕获输入(AVCaptureInpu
t的实例)接收的每个媒体流。 捕获输出在首次创建时没有任何连接。 将输出添加到捕获会话时,将创建将媒体数据从该会话的输入映射到其输出的连接。
您可以使用addOutput(_ :)
将具体的AVCaptureOutput
实例添加到捕获会话。
// 照片输出
AVCapturePhotoOutput *photoOutput = [[AVCapturePhotoOutput alloc] init];
if ( [self.session canAddOutput:photoOutput] ) {
[self.session addOutput:photoOutput];
}
//视频输出
AVCaptureMovieFileOutput *movieFileOutput = [[AVCaptureMovieFileOutput alloc] init];
if ( [self.session canAddOutput:movieFileOutput] )
{
[self.session beginConfiguration];
[self.session addOutput:movieFileOutput];
self.session.sessionPreset = AVCaptureSessionPresetHigh;
AVCaptureConnection *connection = [movieFileOutput connectionWithMediaType:AVMediaTypeVideo];
if ( connection.isVideoStabilizationSupported ) {
connection.preferredVideoStabilizationMode = AVCaptureVideoStabilizationModeAuto;
}
[self.session commitConfiguration];
}
// 元数据
AVCaptureMetadataOutput *output2 = [[AVCaptureMetadataOutput alloc] init];
[_session addOutput:output2];
dispatch_queue_t queue = dispatch_queue_create("myQueue", NULL);
[output2 setMetadataObjectsDelegate:self queue:queue];
AVCaptureVideoPreviewLayer
核心动画层,可以在捕获视频时显示。
AVCaptureVideoPreviewLayer
是CALayer
的子类,用于在视频被输入设备捕获时用于显示视频。
您可以将此预览图层与AV捕获会话结合使用,如以下代码段所示:
AVCaptureSession *captureSession = <#Get a capture session#>;
AVCaptureVideoPreviewLayer *previewLayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
UIView *aView = <#The view in which to present the layer#>;
previewLayer.frame = aView.bounds; // Assume you want the preview layer to fill the view.
[aView.layer addSublayer:previewLayer];
二维码扫描
主要代码:
NSError *error = nil;
// 初始化会话
_session = [[AVCaptureSession alloc] init];
// 设置会话设置输出质量
_session.sessionPreset = AVCaptureSessionPresetMedium;
// 获取需要的设备
AVCaptureDevice *device = [AVCaptureDevice
defaultDeviceWithMediaType:AVMediaTypeVideo];
AVCaptureDeviceInput *input = [AVCaptureDeviceInput deviceInputWithDevice:device
error:&error];
if (!input) {
// Handling the error appropriately.
}
[_session addInput:input];
dispatch_queue_t queue = dispatch_queue_create("myQueue", NULL);
//
AVCaptureMetadataOutput *output2 = [[AVCaptureMetadataOutput alloc] init];
[_session addOutput:output2];
[output2 setMetadataObjectsDelegate:self queue:queue];
// 设置为二维码类型(AVMetadataObjectTypeQRCode),
// 还有其他类型,可以去接口文件里查看
[output2 setMetadataObjectTypes:@[AVMetadataObjectTypeQRCode]];
// Start the session running to start the flow of data
[_session startRunning];
//预览层的生成,实时获取摄像头数据
self.previewLayer = [[AVCaptureVideoPreviewLayer alloc] initWithSession:_session];
self.previewLayer.frame = CGRectMake(0, 64, 300, 304);
self.previewLayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.view.layer addSublayer:self.previewLayer];
代理:<AVCaptureMetadataOutputObjectsDelegate>
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputMetadataObjects:(NSArray *)metadataObjects fromConnection:(AVCaptureConnection *)connection {
if (metadataObjects.count > 0) {
//获得扫描数据,最后一个是最新扫描的数据
AVMetadataMachineReadableCodeObject *object = [metadataObjects lastObject];
// 结果 object.stringValue
dispatch_async(dispatch_get_main_queue(), ^{
_label.text = object.stringValue;
});
} else {
dispatch_async(dispatch_get_main_queue(), ^{
_label.text = @"没有扫描到数据";
});
}
}