因为 基于GPUImage 的对象处理
方式一 基于系统 AVCaptureMetadataOutput 效率比较低
- lazy var myVideo: GPUImageVideoCamera = {
- //初始化相机,第一个参数表示相册的尺寸,第二个参数表示前后摄像头
- let myVideo:GPUImageVideoCamera = GPUImageVideoCamera.init(sessionPreset: AVCaptureSession.Preset.high.rawValue, cameraPosition: AVCaptureDevice.Position.back)
- //设置竖屏
-
- myVideo.outputImageOrientation = UIInterfaceOrientation.portrait
- // 设置前置摄像头和后置摄像头是否进行镜像
- myVideo.horizontallyMirrorRearFacingCamera = false
- myVideo.horizontallyMirrorFrontFacingCamera = true
- //竖屏方向
- myVideo.outputImageOrientation = .portrait
- //添加音频//该句可防止允许声音通过的情况下,避免录制第一帧黑屏闪屏(====)
- myVideo.addAudioInputsAndOutputs()
- myVideo.delegate = self
- //设置帧数
- // myVideo.frameRate = 60
-
- //人脸识别部分代码
- let metaDataOutput = AVCaptureMetadataOutput()
- if myVideo.captureSession.canAddOutput(metaDataOutput){
- myVideo.captureSession.addOutput(metaDataOutput)
- let supportTypes = metaDataOutput.availableMetadataObjectTypes
- if supportTypes.contains(AVMetadataObject.ObjectType.face){
- /*
-
- [metaDataOutput setMetadataObjectTypes:@[AVMetadataObjectTypeFace]];
- [metaDataOutput setMetadataObjectsDelegate:self queue:dispatch_get_main_queue()];
- */
- metaDataOutput.metadataObjectTypes = [AVMetadataObject.ObjectType.face]
- metaDataOutput.setMetadataObjectsDelegate(self, queue: DispatchQueue.main)
- }
- }
-
- return myVideo
- }()
代理方法 GPUImageVideoCameraDelegate 计算出来的坐标基于 图片原始大小 所以需要 自己转换 我这里没有转换
- func metadataOutput(_ output: AVCaptureMetadataOutput, didOutput metadataObjects: [AVMetadataObject], from connection: AVCaptureConnection) {
- if metadataObjects.count != 0 {
- DDLOG(message:"数量:\(metadataObjects.count)")
- //AVMetadataObject 对象
- let faceObjc:AVMetadataFaceObject = metadataObjects.first! as! AVMetadataFaceObject
-
- let faceID = faceObjc.faceID
-
- let bounds = faceObjc.bounds
-
- let rollAngle = faceObjc.rollAngle //侧倾角 手机翻转的角度
-
- let yawAngle = faceObjc.yawAngle //偏转角 脸部偏转角度 正对着为 0度
-
-
-
- DDLOG(message: faceObjc)
-
- let x = DeviceMaxWidth * bounds.origin.x
- let y = DeviceMaxHeight * bounds.origin.y
- let w = DeviceMaxWidth * bounds.size.width
- let h = DeviceMaxHeight * bounds.size.height
-
- }else{
- DDLOG(message: "没找到人脸")
- }
- }
方法二 基于 CIImage
简介 (网上copy 的 )
CoreImage framework组成
Apple 已经帮我们把image的处理分类好,来看看它的结构:
core Image.png
主要分为三个部分:
- 定义部分:CoreImage 和CoreImageDefines。见名思义,代表了CoreImage 这个框架和它的定义。
- 操作部分:
- 滤镜(CIFliter):CIFilter 产生一个CIImage。典型的,接受一到多的图片作为输入,经过一些过滤操作,产生指定输出的图片。
- 检测(CIDetector):CIDetector 检测处理图片的特性,如使用来检测图片中人脸的眼睛、嘴巴、等等。
- 特征(CIFeature):CIFeature 代表由 detector处理后产生的特征。
- 图像部分:
- 画布(CIContext):画布类可被用与处理Quartz 2D 或者 OpenGL。可以用它来关联CoreImage类。如滤镜、颜色等渲染处理。
- 颜色(CIColor): 图片的关联与画布、图片像素颜色的处理。
- 向量(CIVector): 图片的坐标向量等几何方法处理。
- 图片(CIImage): 代表一个图像,可代表关联后输出的图像。
在了解上述基本知识后,我们开始通过创建一个工程来带大家一步步验证Core Image的人脸识别特性。
人脸监测的步骤是:先获取图像,然后转成CIImage格式,利用CIFeature特征,使用探测器CIDetector拿到所有的人脸,然后在图中圈出,即可达到人脸识别的目的,关键代码如下:
- override func viewDidLoad() {
- super.viewDidLoad()
-
-
-
-
-
- let imgView = UIImageView.init(frame: CGRect.init(x: 0, y: 0, width: DeviceMaxWidth, height: DeviceMaxHeight))
- imgView.image = image
- imgView.contentMode = UIViewContentMode.scaleAspectFit
- self.view.addSubview(imgView)
- let ciImage = CIImage.init(image: image)
- let faceDetector:CIDetector = CIDetector.init(ofType: CIDetectorTypeFace, context: nil, options: [CIDetectorAccuracy: CIDetectorAccuracyLow,CIDetectorTracking:true])!
- let featureArray:[CIFeature] = faceDetector.features(in: ciImage!)
- DDLOG(message: "识别个数:\(featureArray.count)")
- /*
-
- 人脸识别是在原始图像上进行的,由于原始图像的分辨率比image view要高,因此需要设置image view的content mode为aspect fit(保持纵横比的情况下缩放图片)。为了合适的绘制矩形框,需要计算image view中人脸的实际位置与尺寸
- 还要注意的是,CoreImage与UIView使用两种不同的坐标系统(看下图),因此要实现一个CoreImage坐标到UIView坐标的转换。
- */
- // 转换坐标系
- let ciImageSize = ciImage?.extent.size
- var transform = CGAffineTransform(scaleX: 1, y: -1)
- transform = transform.translatedBy(x: 0, y: -CGFloat((ciImageSize?.height)!))
-
- for index in 0..<featureArray.count {
- let face:CIFaceFeature = featureArray[index] as! CIFaceFeature
-
-
- // 应用变换转换坐标
- var faceViewBounds = face.bounds.applying(transform)
- // //获取转换后的坐标
- // let faceBounds = self.transform(bgView: imgView, ciImage: ciImage, transfromRect: faceViewBounds)
-
- // 在图像视图中计算矩形的实际位置和大小
- let viewSize = imgView.bounds.size
- //计算宽高比例
- let scale = min(viewSize.width / (ciImageSize?.width)!, viewSize.height / (ciImageSize?.height)!)
-
- //放在imageview 之后 图片相对于背景 的 x y 坐标
- let offsetX = (viewSize.width - (ciImageSize?.width)! * scale) / 2
- let offsetY = (viewSize.height - (ciImageSize?.height)! * scale) / 2
-
- //获取缩放后的 脸部位置坐标
- faceViewBounds = faceViewBounds.applying(CGAffineTransform(scaleX: scale, y: scale))
- faceViewBounds.origin.x += offsetX
- faceViewBounds.origin.y += offsetY
-
-
- let faceVIew = UIView.init(frame: faceViewBounds)
- faceVIew.layer.borderWidth = 1
- faceVIew.layer.borderColor = UIColor.red.cgColor
- faceVIew.backgroundColor = UIColor.clear
- imgView.addSubview(faceVIew)
-
- DDLOG(message:"111\(imgView.bounds)" )
- DDLOG(message:"222\(String(describing: ciImage?.extent.size))" )
- DDLOG(message:"脸部相对于原始图片位置\(face.bounds)" )
- DDLOG(message:"脸部相对于 转换后图片位置\(faceViewBounds)" )
-
- if face.hasLeftEyePosition {
- //左眼位置
- print("左眼位置 \(face.leftEyePosition)")
- // 应用矩阵变换转换坐标
- let leftEyesPoint = face.leftEyePosition.applying(transform)
- //缩放后坐标
- var leftEyesScalPoint = leftEyesPoint.applying(CGAffineTransform(scaleX: scale, y: scale))
-
- leftEyesScalPoint.x += offsetX
- leftEyesScalPoint.y += offsetY
-
-
- let leftEyesVIew = UIView.init(frame: CGRect.init(origin: leftEyesScalPoint, size: CGSize(width: 20, height: 20)))
-
- leftEyesVIew.layer.cornerRadius = leftEyesVIew.width/2
- leftEyesVIew.layer.borderWidth = 1
- leftEyesVIew.layer.borderColor = UIColor.red.cgColor
- leftEyesVIew.backgroundColor = UIColor.clear
- imgView.addSubview(leftEyesVIew)
-
- }
-
- if face.hasRightEyePosition {
- //右眼位置
- print("右眼位置 \(face.rightEyePosition)")
- // 应用矩阵变换转换坐标
- let rightEyesPoint = face.rightEyePosition.applying(transform)
- //缩放后坐标
- var rightEyesScalPoint = rightEyesPoint.applying(CGAffineTransform(scaleX: scale, y: scale))
-
- rightEyesScalPoint.x += offsetX
- rightEyesScalPoint.y += offsetY
- let rightEyesVIew = UIView.init(frame: CGRect.init(origin: rightEyesScalPoint, size: CGSize(width: 20, height: 20)))
-
- rightEyesVIew.layer.cornerRadius = rightEyesVIew.width/2
- rightEyesVIew.layer.borderWidth = 1
- rightEyesVIew.layer.borderColor = UIColor.red.cgColor
- rightEyesVIew.backgroundColor = UIColor.clear
- imgView.addSubview(rightEyesVIew)
- }
- if face.leftEyeClosed {
- //左眼闭着
- print("左眼闭着")
- }
-
- if face.rightEyeClosed {
- //右眼闭着
- print("右眼闭着")
- }
-
- if face.hasMouthPosition {
- //嘴巴位置
- print("嘴巴位置e \(face.mouthPosition)")
-
- // 应用矩阵变换转换坐标
- let mouthPoint = face.mouthPosition.applying(transform)
- //缩放后坐标
- var mouthScalPoint = mouthPoint.applying(CGAffineTransform(scaleX: scale, y: scale))
-
- mouthScalPoint.x += offsetX
- mouthScalPoint.y += offsetY
-
-
- let mouthVIew = UIView.init(frame: CGRect.init(origin:mouthScalPoint, size: CGSize(width: 30, height: 30)))
- mouthVIew.layer.borderWidth = 1
- mouthVIew.layer.borderColor = UIColor.red.cgColor
- mouthVIew.backgroundColor = UIColor.clear
-
- imgView.addSubview(mouthVIew)
- }
- if face.hasSmile {
- //是否微笑
- print("在微笑")
- }
-
- if face.hasFaceAngle {
- //人脸角度
- print("人脸角度 \(face.faceAngle)")
- }
- if face.hasTrackingID {
- //获取追踪对象
- print("获取追踪对象ID \(face.trackingID)")
- }
- if face.hasTrackingFrameCount {
- //获取追踪对象个数
- print("获取追踪对象个数 \(face.hasTrackingFrameCount)")
- }
- }
-
- let backbtn = UIButton.init(frame: CGRect.init(x: 15, y:20, width: 30, height: 30))
- backbtn.setTitleColor(UIColor.red, for: .normal)
- backbtn.titleLabel?.font = UIFont.systemFont(ofSize: 14)
- backbtn.setTitle("返回", for: .normal)
- backbtn.addTarget(self, action: #selector(btnEvent1), for: .touchUpInside)
- backbtn.tag = 0
- self.view.addSubview(backbtn)
- }