iOS AVFoundation动态人脸识别功能
2019-12-12 本文已影响0人
劉光軍_MVP
一、AVCaptureSession
:设备输入数据管理对象
- 可以根据
AVCaptureSession
创建对应的AVCaptureDeviceInput
、AVCaptureVideoDataOutput
对象 - 创建出来的Input、Output对象会被添加到
AVCaptureSession
中管理,代表输入、输出数据对象,它配置抽象硬件设备的ports。
// 1.创建媒体管理会话
AVCaptureSession *captureSession = [[AVCaptureSession alloc] init];
self.session = captureSession;
// 判断分辨率是否支持 640x480,支持就设置为:640x480
if ([captureSession canSetSessionPreset:AVCaptureSessionPreset640x480]) {
captureSession.sessionPreset = AVCaptureSessionPreset640x480;
}
二、AVCaptureDevice
:代表硬件设备
- 可以从这个类中获取手机硬件的照相机、声音传感器等
- 当我们在应用程序中需要改变一些硬件设备的属性(切换摄像头、闪光模式改变、相机聚焦改变)的时候必须要先为设备加锁,修改完成后解锁。
(补充)
//4. 移除旧输入,添加新输入
//4.1 设备加锁
session.beginConfiguration()
//4.2. 移除旧设备
session.removeInput(deviceIn)
//4.3 添加新设备
session.addInput(newVideoInput)
//4.4 设备解锁
session.commitConfiguration()
// 2.获取前置摄像头
AVCaptureDevice *captureDevice = nil;
NSArray *cameras = [AVCaptureDevice devicesWithMediaType:AVMediaTypeVideo];
for (AVCaptureDevice *camera in cameras) {
if (camera.position == AVCaptureDevicePositionFront) {
captureDevice = camera;
}
}
if (!captureDevice) {
[DLLoading DLToolTipInWindow:@"无前置摄像头!"];
return;
}
三、AVCaptureDeviceInput
设备输入数据管理对象
- 可以根据AVCaptureDevice创建对应的AVCaptureDeviceInput对象
- 该对象将会被添加到AVCaptureSession中管理,代表输入设备,它配置抽象硬件设备的ports,常用的有麦克风、相机等
// 3.创建输入数据对象
NSError *error = nil;
AVCaptureDeviceInput *captureInput = [AVCaptureDeviceInput deviceInputWithDevice:captureDevice error:&error];
if (error) {
[DLLoading DLToolTipInWindow:@"创建输入数据对象错误"];
return;
}
四、AVCaptureOutput
输出数据
- 输出的可以是图片(
AVCaptureStillImageOutput
)或者视频(AVCaptureMovieFileOutput
)
// 4.创建输出数据对象
AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];
captureOutput.alwaysDiscardsLateVideoFrames = YES;
[captureOutput setSampleBufferDelegate:self queue:dispatch_queue_create("cameraQueue", NULL)];
NSDictionary *videoSettings = [NSDictionary dictionaryWithObject:[NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA] forKey:(NSString*)kCVPixelBufferPixelFormatTypeKey];
[captureOutput setVideoSettings:videoSettings];
五、添加输入、输出数据对象到session中
// 5.添加【输入数据对象】和【输出数据对象】到会话中
if ([captureSession canAddInput:captureInput]) {
[captureSession addInput:captureInput];
}
if ([captureSession canAddOutput:captureOutput]) {
[captureSession addOutput:captureOutput];
}
六、AVCaptureVideoPreviewLayer
创建实时预览图层
- 我们手机的照片以及视频是怎样显示在手机屏幕上的呢,就是通过把这个对象添加到
UIView
的layer
上的。
// 6.创建实时预览图层
AVCaptureVideoPreviewLayer *previewlayer = [AVCaptureVideoPreviewLayer layerWithSession:captureSession];
[previewlayer connection].videoOrientation = (AVCaptureVideoOrientation)[[UIApplication sharedApplication] statusBarOrientation];
self.view.layer.masksToBounds = YES;
previewlayer.frame = CGRectMake((kMainScreenWidth-200)/2, 90, 200, 200);
previewlayer.videoGravity = AVLayerVideoGravityResizeAspectFill;
[self.scanView insertPreviewLayer:previewlayer];
人脸检测器
#pragma mark - 人脸检测器
- (CIDetector *)detector{
if (_detector == nil){
CIContext *context = [CIContext contextWithOptions:nil];
NSDictionary *options = [NSDictionary dictionaryWithObject:CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
_detector = [CIDetector detectorOfType:CIDetectorTypeFace context:context options:options];
}
return _detector;
}
检测人脸照片
#pragma mark - 检测人脸照片
- (UIImage *)getFaceImageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer{
CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
CIContext *temporaryContext = [CIContext contextWithOptions:nil];
CGImageRef videoImage;
if ([[UIApplication sharedApplication] statusBarOrientation] == UIInterfaceOrientationPortrait) {
videoImage = [temporaryContext createCGImage:ciImage fromRect:CGRectMake(0, 80, 480, 480)];
}else{
videoImage = [temporaryContext createCGImage:ciImage fromRect:CGRectMake(80, 0, 480, 480)];
}
UIImage *resultImg = [[UIImage alloc] initWithCGImage:videoImage];
CGImageRelease(videoImage);
//人脸检测
CIImage *resultCmg = [[CIImage alloc] initWithCGImage:resultImg.CGImage];
CIFaceFeature * faceFeature = [self.detector featuresInImage:resultCmg].linq_firstOrNil;
if (faceFeature && faceFeature.hasLeftEyePosition && faceFeature.hasRightEyePosition && faceFeature.hasMouthPosition) {
return resultImg;
}
return nil;
}
代理方法
- 获取到outputSampleBuffer 后进行人脸识别操作。
#pragma mark - AVCaptureVideoDataOutputSampleBufferDelegate
- (void)captureOutput:(AVCaptureOutput *)captureOutput didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer fromConnection:(AVCaptureConnection *)connection{
if (!self.isDetecting) {
self.isDetecting = YES;
[connection setVideoOrientation:(AVCaptureVideoOrientation)[[UIApplication sharedApplication] statusBarOrientation]];
UIImage *img = [self getFaceImageFromSampleBuffer:sampleBuffer];
if (img && self.timeoutTime > 2) {
dispatch_async(dispatch_get_main_queue(), ^{
[self.session stopRunning];
self.isDetecting = NO;
self.timeoutTime = 0;
[self.scanView startAnimating];
[self.viewModel faceScanWithImg:img];
});
}else{
self.isDetecting = NO;
}
}
}