使用CoreImage人脸识别初试

2017-09-06  本文已影响20人  Claire_wu

一 静态图片的人脸识别
静态图片的人脸识别比较简单,直接看代码注释即可

- (void)faceDetectWithImage:(UIImage *)image {
    
    for (UIView *view in _imageView.subviews) {
        [view removeFromSuperview];
    }
    
    // 图像识别能力:可以在CIDetectorAccuracyHigh(较强的处理能力)与CIDetectorAccuracyLow(较弱的处理能力)中选择,因为想让准确度高一些在这里选择CIDetectorAccuracyHigh
    NSDictionary *opts = [NSDictionary dictionaryWithObject:
                          CIDetectorAccuracyHigh forKey:CIDetectorAccuracy];
    // 将图像转换为CIImage
    CIImage *faceImage = [CIImage imageWithCGImage:image.CGImage];
    CIDetector *faceDetector=[CIDetector detectorOfType:CIDetectorTypeFace context:nil options:opts];
    // 识别出人脸数组
    NSArray *features = [faceDetector featuresInImage:faceImage];
    // 得到图片的尺寸
    CGSize inputImageSize = [faceImage extent].size;
    //将image沿y轴对称
    CGAffineTransform transform = CGAffineTransformScale(CGAffineTransformIdentity, 1, -1);
    //将图片上移
    transform = CGAffineTransformTranslate(transform, 0, -inputImageSize.height);
    
    // 取出所有人脸
    for (CIFaceFeature *faceFeature in features){
        //获取人脸的frame
        CGRect faceViewBounds = CGRectApplyAffineTransform(faceFeature.bounds, transform);
        CGSize viewSize = _imageView.bounds.size;
        CGFloat scale = MIN(viewSize.width / inputImageSize.width,
                            viewSize.height / inputImageSize.height);
        CGFloat offsetX = (viewSize.width - inputImageSize.width * scale) / 2;
        CGFloat offsetY = (viewSize.height - inputImageSize.height * scale) / 2;
        // 缩放
        CGAffineTransform scaleTransform = CGAffineTransformMakeScale(scale, scale);
        // 修正
        faceViewBounds = CGRectApplyAffineTransform(faceViewBounds,scaleTransform);
        faceViewBounds.origin.x += offsetX;
        faceViewBounds.origin.y += offsetY;
    
        //描绘人脸区域
        UIView* faceView = [[UIView alloc] initWithFrame:faceViewBounds];
        faceView.layer.borderWidth = 2;
        faceView.layer.borderColor = [[UIColor redColor] CGColor];
        [_imageView addSubview:faceView];
    
        // 判断是否有左眼位置
        if(faceFeature.hasLeftEyePosition){}
        // 判断是否有右眼位置
        if(faceFeature.hasRightEyePosition){}
        // 判断是否有嘴位置
        if(faceFeature.hasMouthPosition){}
    }
    NSLog(@"识别出了%ld张脸", features.count);
}

二 相机实时的人脸识别,分步骤如下:
(1)初始化相机

#pragma mark - 初始化相机
- (void)getCameraSession
{
    //初始化会话
    _captureSession=[[AVCaptureSession alloc]init];
    if ([_captureSession canSetSessionPreset:AVCaptureSessionPreset1280x720]) {//设置分辨率
        _captureSession.sessionPreset = AVCaptureSessionPreset1280x720;
    }
    //获得输入设备
    AVCaptureDevice *captureDevice=[self getCameraDeviceWithPosition:AVCaptureDevicePositionFront];//取得前置摄像头
    if (!captureDevice) {
        NSLog(@"取得前置摄像头时出现问题.");
        return;
    }
    
    NSError *error=nil;
    //根据输入设备初始化设备输入对象,用于获得输入数据
    _captureDeviceInput=[[AVCaptureDeviceInput alloc]initWithDevice:captureDevice error:&error];
    if (error) {
        NSLog(@"取得设备输入对象时出错,错误原因:%@",error.localizedDescription);
        return;
    }
    [_captureSession addInput:_captureDeviceInput];
    
    //初始化设备输出对象,用于获得输出数据
    _captureStillImageOutput=[[AVCaptureStillImageOutput alloc]init];
    NSDictionary *outputSettings = @{AVVideoCodecKey:AVVideoCodecJPEG};
    [_captureStillImageOutput setOutputSettings:outputSettings];//输出设置
    
    //将设备输入添加到会话中
    if ([_captureSession canAddInput:_captureDeviceInput]) {
        [_captureSession addInput:_captureDeviceInput];
    }
    
    //将设备输出添加到会话中
    if ([_captureSession canAddOutput:_captureStillImageOutput]) {
        [_captureSession addOutput:_captureStillImageOutput];
    }
    
    //创建视频预览层,用于实时展示摄像头状态
    _captureVideoPreviewLayer=[[AVCaptureVideoPreviewLayer alloc]initWithSession:self.captureSession];
    
    CALayer *layer=self.videoMainView.layer;
    layer.masksToBounds=YES;
    _captureVideoPreviewLayer.frame=layer.bounds;
    _captureVideoPreviewLayer.videoGravity=AVLayerVideoGravityResizeAspectFill;//填充模式
    
    //将视频预览层添加到界面中
    [layer addSublayer:_captureVideoPreviewLayer];
    [layer insertSublayer:_captureVideoPreviewLayer below:self.focusCursor.layer];// 没啥用的东西
    
    // 初始化数据流
    [self addVidelDataOutput];
}

(2)需要动态进行人脸识别,所以需要启用数据流,在这里需要设置并遵守代理

/**
 *  AVCaptureVideoDataOutput 获取数据流
 */
- (void)addVidelDataOutput
{
    AVCaptureVideoDataOutput *captureOutput = [[AVCaptureVideoDataOutput alloc] init];
    captureOutput.alwaysDiscardsLateVideoFrames = YES;
    
    dispatch_queue_t queue;
    queue = dispatch_queue_create("myQueue", DISPATCH_QUEUE_SERIAL);
    [captureOutput setSampleBufferDelegate:self queue:queue];
    
    NSString *key = (NSString *)kCVPixelBufferPixelFormatTypeKey;
    NSNumber *value = [NSNumber numberWithUnsignedInt:kCVPixelFormatType_32BGRA];
    NSDictionary *settings = @{key:value};
    [captureOutput setVideoSettings:settings];
    
    [self.captureSession addOutput:captureOutput];
}

(3)实现数据流代理

#pragma mark - Samle Buffer Delegate
// 抽样缓存写入时所调用的委托程序
- (void)captureOutput:(AVCaptureOutput *)captureOutput
didOutputSampleBuffer:(CMSampleBufferRef)sampleBuffer
       fromConnection:(AVCaptureConnection *)connection
{
    UIImage *img = [self imageFromSampleBuffer:sampleBuffer];
    UIImage *image = [[InfoTools shareInfoTools] fixOrientation:img];
    // 人脸检测
    NSArray *features = [[InfoTools shareInfoTools]leftEyePositionsWithImage:image];
    
    dispatch_async(dispatch_get_main_queue(), ^{
        if (self.videoMainView.subviews.count -1 <features.count) {
            FaceLabel *label =[[FaceLabel alloc]init];
            label.hidden = YES;
            [self.videoMainView addSubview:label];
        }
        for (UIView *label in self.videoMainView.subviews) {
            if ([label isMemberOfClass:[FaceLabel class]]) {
                label.hidden = YES;
            }
        }
        if (features.count >0) {
            for (int i=0;i<features.count; i++) {
                
                
                NSValue *layerRect = features[i];
                FaceLabel *label = self.videoMainView.subviews[i+1];
                CGRect originalRect = [layerRect CGRectValue];
                CGRect getRect = [self getUIImageViewRectFromCIImageRect:originalRect];
                label.frame = getRect;
                label.hidden = NO;
                
            }
        }
        else{
            for (UIView *label in self.videoMainView.subviews) {
                if ([label isMemberOfClass:[FaceLabel class]]) {
                    label.hidden = YES;
                }
            }
        }
    });
}

(4)通过抽样缓存数据创建一个UIImage对象

//在该代理方法中,sampleBuffer是一个Core Media对象,可以引入Core Video供使用
// 通过抽样缓存数据创建一个UIImage对象
- (UIImage *)imageFromSampleBuffer:(CMSampleBufferRef) sampleBuffer
{
    CVImageBufferRef imageBuffer = CMSampleBufferGetImageBuffer(sampleBuffer);
    CIImage *ciImage = [CIImage imageWithCVPixelBuffer:imageBuffer];
    CIContext *temporaryContext = [CIContext contextWithOptions:nil];
    CGImageRef videoImage = [temporaryContext createCGImage:ciImage fromRect:CGRectMake(0, 0, CVPixelBufferGetWidth(imageBuffer), CVPixelBufferGetHeight(imageBuffer))];
    UIImage *result = [[UIImage alloc] initWithCGImage:videoImage scale:1.0 orientation:UIImageOrientationLeftMirrored];
    CGImageRelease(videoImage);
    return result;
}

参考Demo1,动态相机识别
参考Demo2,静态图片识别

上一篇下一篇

猜你喜欢

热点阅读