iOS 杂谈iOS开发首页投稿(暂停使用,暂停投稿)

iOS tutorial 2:用Core Image进行面部识别

2017-07-14  本文已影响273人  Andy_Ron

参考:Face Detection in iOS Using Core Image

面部识别API不仅可以是识别面部,也可识别面部的特殊细节,例如微笑甚至眨眼睛。

建立初始项目

原文建好了初始项目,我自己新建了初始项目

识别照片的面部

func detect() {
    // 1    
    guard let personciImage = CIImage(image: personPic.image!) else {
        return
    }
    // 2 
    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    let faces = faceDetector?.features(in: personciImage)
    
    // 3 
    for face in faces as! [CIFaceFeature] {
        
        print("Found bounds are \(face.bounds)")
        
        let faceBox = UIView(frame: face.bounds)
        
        faceBox.layer.borderWidth = 3
        faceBox.layer.borderColor = UIColor.red.cgColor
        faceBox.backgroundColor = UIColor.clear
        personPic.addSubview(faceBox)
        // 4
        if face.hasLeftEyePosition {
            print("Left eye bounds are \(face.leftEyePosition)")
        }
        
        if face.hasRightEyePosition {
            print("Right eye bounds are \(face.rightEyePosition)")
        }
    }
}

打印结果,显示检测到的面部位置是不对的:
Found bounds are (177.0, 416.0, 380.0, 380.0)
这是因为UIKit的坐标系统与Core Image的坐标系统是不同的:
func detect() {
        
    guard let personciImage = CIImage(image: personPic.image!) else {
        return
    }
    
    let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
    let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
    let faces = faceDetector?.features(in: personciImage)
    //
    let ciImageSize = personciImage.extent.size
    var transform = CGAffineTransform(scaleX: 1, y: -1)
    transform = transform.translatedBy(x: 0, y: -ciImageSize.height)
    
    for face in faces as! [CIFaceFeature] {
        
        print("Found bounds are \(face.bounds)")
        
        // Apply the transform to convert the coordinates
        var faceViewBounds = face.bounds.applying(transform)
        
        // Calculate the actual position and size of the rectangle in the image view
        let viewSize = personPic.bounds.size
        let scale = min(viewSize.width / ciImageSize.width,
                        viewSize.height / ciImageSize.height)
        let offsetX = (viewSize.width - ciImageSize.width * scale) / 2
        let offsetY = (viewSize.height - ciImageSize.height * scale) / 2
        
        faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
        faceViewBounds.origin.x += offsetX
        faceViewBounds.origin.y += offsetY
        
        let faceBox = UIView(frame: faceViewBounds)
        
        faceBox.layer.borderWidth = 3
        faceBox.layer.borderColor = UIColor.red.cgColor
        faceBox.backgroundColor = UIColor.clear
        personPic.addSubview(faceBox)
        
        if face.hasLeftEyePosition {
            print("Left eye bounds are \(face.leftEyePosition)")
        }
        
        if face.hasRightEyePosition {
            print("Right eye bounds are \(face.rightEyePosition)")
        }
    }
}

运行可看到正确识别位置:


相机拍照后的脸部识别

之前是项目中照片识别,现在是拍完照再识别,原理是相同的,就是多一个拍完照,取照片的过程。

// 1
class CameraViewController: UIViewController, UIImagePickerControllerDelegate, UINavigationControllerDelegate {

    @IBOutlet var imageView: UIImageView!
    // 2
    let imagePicker = UIImagePickerController()
    
    override func viewDidLoad() {
        super.viewDidLoad()
        imagePicker.delegate = self
    }
    
    @IBAction func takePhoto(_ sender: AnyObject) {
        // 3
        if !UIImagePickerController.isSourceTypeAvailable(.camera) {
            return
        }
        imagePicker.allowsEditing = false
        imagePicker.sourceType = .camera
        present(imagePicker, animated: true, completion: nil)
    }
    // 4
    //MARK: -UIImagePickerControllerDelegate
    func imagePickerController(_ picker: UIImagePickerController, didFinishPickingMediaWithInfo info: [String : Any]) {
        if let pickedImage = info[UIImagePickerControllerOriginalImage] as? UIImage {
            imageView.contentMode = .scaleAspectFit
            imageView.image = pickedImage
        }
        
        dismiss(animated: true, completion: nil)
        self.detect()
        
    }
    // 5 
    func imagePickerControllerDidCancel(_ picker: UIImagePickerController) {
        dismiss(animated: true, completion: nil)
    }
}
func detect() {
        let imageOptions = NSDictionary(object: NSNumber(value: 5) as NSNumber, forKey: CIDetectorImageOrientation as NSString)
        let personciImage = CIImage(cgImage: imageView.image!.cgImage!)
        let accuracy = [CIDetectorAccuracy: CIDetectorAccuracyHigh]
        let faceDetector = CIDetector(ofType: CIDetectorTypeFace, context: nil, options: accuracy)
        let faces = faceDetector?.features(in: personciImage, options: imageOptions as? [String : AnyObject])
        
        if let face = faces?.first as? CIFaceFeature {
            print("found bounds are \(face.bounds)")
            
            var message = "有个脸"
            
            if face.hasSmile {
                print("脸是笑的")
                message += ",脸是笑的"
            }
            if face.hasMouthPosition {
                print("有嘴唇")
                message += ",有嘴唇"
            }
            
            if face.hasLeftEyePosition {
                print("左眼镜的位置是 \(face.leftEyePosition)")
                message += ",左眼镜的位置是 \(face.leftEyePosition)"
            }
            
            if face.hasRightEyePosition {
                print("右眼镜的位置是 \(face.rightEyePosition)")
                message += ",右眼镜的位置是 \(face.rightEyePosition)"
            }
            
            let alert = UIAlertController(title: "嘿嘿", message: message, preferredStyle: .alert)
            alert.addAction(UIAlertAction(title: "OK", style: .default, handler: nil))
            self.present(alert, animated: true, completion: nil)
            
        } else {
            let alert = UIAlertController(title: "没脸了", message: "没有检测到脸", preferredStyle: .alert)
            alert.addAction(UIAlertAction(title: "OK", style: .default, handler: nil))
            self.present(alert, animated: true, completion: nil)
        }        
}

运行就可以识别照片的面部具体细节
CIFaceFeature还提供了其他很多面部细节:

        open var hasLeftEyePosition: Bool { get }

        open var leftEyePosition: CGPoint { get }

        open var hasRightEyePosition: Bool { get }

        open var rightEyePosition: CGPoint { get }

        open var hasMouthPosition: Bool { get }

        open var mouthPosition: CGPoint { get }

        open var hasTrackingID: Bool { get }

        open var trackingID: Int32 { get }

        open var hasTrackingFrameCount: Bool { get }

        open var trackingFrameCount: Int32 { get }

        open var hasFaceAngle: Bool { get }

        open var faceAngle: Float { get }

        open var hasSmile: Bool { get }

        open var leftEyeClosed: Bool { get }

        open var rightEyeClosed: Bool { get }     

代码

Detector

上一篇 下一篇

猜你喜欢

热点阅读