赞
踩
<div class="photo">
<video ref="video" autoplay />
<canvas ref="canvas" />
</div>
安装face-api.js
npm install face-api.js
下载模型,用于对比
下载地址:face-api.js使用模型
下载后放置于public中
// 引入face-api.js中的方法
import {
detectAllFaces,
TinyFaceDetectorOptions,
bufferToImage,
detectSingleFace,
nets,
matchDimensions,
resizeResults,
draw,
SsdMobilenetv1Options,
Box,
} from 'face-api.js'
const options = new SsdMobilenetv1Options({
// 最小置信阈值
// 默认值:0.5
minConfidence: 0.5,
})
const title = ref('人脸识别') // 初始化title
const canvas = ref('canvas') // 图像画布
const ctx = document.createElement('canvas').getContext('2d')
const video = ref('video') // 视频元素
let imgUrl = reactive<string | null>(null) // 照片路径
const stream = ref(null) // 当前流
const getUserMediaFail = ref(false) // 获取用户媒体失败
const boxObject = ref({ width: 100, height: 100 }) // 初始化box
const viewFinderBox = ref({
topLeft: {
x: 0,
y: 0,
},
topRight: {
x: 0,
y: 0,
},
bottomLeft: {
x: 0,
y: 0,
},
bottomRight: {
x: 0,
y: 0,
},
}) // 初始化viewFin3、derBox
/** 调用摄像头 */
const getUserMedia = (
success: NavigatorUserMediaSuccessCallback,
error: NavigatorUserMediaErrorCallback
) => {
//优先使用前置摄像头:{ video: { facingMode: "user" } }
//强制使用后置摄像头:{ video: { facingMode: { exact: "environment" } } }
const constraints = {
video: {
facingMode: 'user',
},
}
if (navigator.mediaDevices.getUserMedia) {
// 最新的标准API
navigator.mediaDevices.getUserMedia(constraints).then(success).catch(error)
} else if (navigator.webkitGetUserMedia) {
// webkit核心浏览器
navigator.webkitGetUserMedia(constraints, success, error)
} else if (navigator.mozGetUserMedia) {
// firfox浏览器
navigator.mozGetUserMedia(constraints, success, error)
} else if (navigator.getUserMedia) {
// 旧版API
navigator.getUserMedia(constraints, success, error)
}
}
const cameraShoot = (
video: HTMLVideoElement,
) => {
const canvas = document.createElement('canvas')
canvas.width = video.videoWidth
canvas.height = video.videoHeight
canvas
.getContext('2d')
?.drawImage(
video,
(canvas.width - canvas.width * (320 / 240)) / 2,
0,
canvas.width * (320 / 240),
canvas.height
)
return new Promise<Blob | null>((resolve) =>
canvas.toBlob(resolve, 'image/jpeg'),
)
}
const drawBox = (box, label) => {
if (!canvas.value) return
const context = canvas.value.getContext('2d')
context?.clearRect(box.x, box.y, box.width, box.height)
const drawBox = new draw.DrawBox(box, {
label: label,
})
drawBox.draw(canvas.value)
}
const handleStopVideo = () => {
if (stream.value) {
stream.value.getTracks().forEach((track) => {
track.stop()
})
}
}
const detectFace = async () => {
//非常重要:防止卡死
await new Promise((resolve) => requestAnimationFrame(resolve))
if (
!canvas.value ||
!video.value ||
!video.value.currentTime ||
video.value.paused ||
video.value.ended
)
return detectFace()
// 检测图像中具有最高置信度得分的脸部
const result = await detectSingleFace(video.value, options)
if (!result) return detectFace()
// 匹配尺寸
const dims = matchDimensions(canvas.value, video.value, true)
// 调整检测到的框的大小,以防显示的图像的大小与原始
const resizedResult = resizeResults(result, dims)
const box = resizedResult.box
drawBox(box, '识别中')
video.value.pause()
// 截取人脸图片
const image =await cameraShoot(
video.value,
)
// 图片blob类型转file类型
let files = new window.File([image], '人脸头像.jpeg', {
type: 'image/jpeg'
})
// 转标签元素 为获取人脸特征值
let img = await bufferToImage(files);
// 提取人脸特征值
const detections = await detectAllFaces(img, new TinyFaceDetectorOptions()).withFaceLandmarks().withFaceDescriptors();
if (detections.length > 0) {
ctx.drawImage(img, 0, 0, canvas.width, canvas.height);
detections.forEach((detection) => {
// 提取人脸特征值并进行后续操作
let data= detection.descriptor
// 接口上传人脸特征值进行对比返回验证结果
const detectResult = await uploadImg(data)
if (detectResult.msg ==='验证成功') {
getResult({
type: 'success',
label: '通过',
id: infor.id,
time: time,
})
}else{
getResult({
type: 'danger',
label: '未通过',
id: infor.id,
time: "13:00:00",
})
}
handleStopVideo()
})
});
} else {
console.log('未检测到人脸');
video.value.play()
return detectFace()
}
if (!image) {
drawBox(box, '识别失败')
await delay(1000)
video.value.play()
return detectFace()
}
}
onMounted(() => {
// 获取用户媒体流
getUserMedia(
(streams) => {
//后续用于停止视频流
stream.value = streams
//显示视频
if (video.value) {
video.value['srcObject'] = streams
}
},
(error) => (getUserMediaFail.value = true)
)// 加载算法模型 文件存储在 public 文件夹下models文件夹
Promise.all([
nets.ssdMobilenetv1.loadFromUri('/models'),
nets.tinyFaceDetector.loadFromUri('/models'),
nets.faceLandmark68Net.loadFromUri('/models'),
nets.faceRecognitionNet.loadFromUri('/models')
]).then(startFaceRecognition());
detectFace()
})
.photo {
width: 200px;
height: 300px;
display: flex;
flex-direction: column;
align-items: center;
justify-content: center;
position: relative;
video {
width: 100%;
height: 100%;
// object-fit: fill;
transform: scaleX(-1);
}
img{
width: 100%;
}
canvas {
width: 100%;
height: 100%;
position: absolute;
top: 0px;
}
}
上述完成可识别到人脸,获取到特征值数据
可根据自己的需求调整
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。