赞
踩
先看一下我的效果图吧
说明:我这里是调用摄像头识别人脸,根据返回的信息,再加以描述,右边的小图片是,根据返回的人脸位置,宽度大小截取出来的
首先我们得申请一个百度ai人脸识别的接口,并且获取key 和secret 这样才能后续使用。
第一步:先写html
<template> <div class="main"> <div class="cam"> <div class="video-box"> <div class="videos"> <video id="video" style="width: 100%;height: 100%; object-fit: fill;" preload autoplay loop muted ></video> </div> <div class="button-box" @click="submit()"> <img src="../assets/cam.png" /> </div> </div> <div class="title-box"> 识别分析 </div> <div class="right-box"> <div ref="mainscroll" class="face-box" v-loading="loading" element-loading-text="拼命加载中" element-loading-spinner="el-icon-loading" element-loading-background="transparent" > <div class="details" v-for="(item, index) in threeImageArray" :key="index" > <div class="image-box"> <img :src="item.image" /> </div> <div class="list-box"> <div class="sex-one">年龄:{{ item.age }}</div> </div> </div> </div> </div> <div class="canva-box"> <canvas ref="canvas" id="canvas" width="1000" height="700"></canvas> </div> </div> </div> </template>说明:因为需求时只显示摄像头,所以,我把截图的图片canvas部分放在了摄像头的底层,大家根据需求来写css样式吧
第二步:写css
<style scoped> .main { width: 100%; height: 100vh; display: flex; align-items: center; justify-content: center; } .cam { width: 50%; height: 90%; display: flex; flex-direction: column; align-items: center; background-color: #fff; border-radius: 20px; } .canva-box { width: 100%; display: flex; align-items: center; justify-content: center; position: absolute; top: 0; z-index: -99; } .button-box { position: absolute; top: 35%; border: 2px solid #fff; padding: calc(100vw * 20 / 1920); border-radius: 50%; } .button-box img { width: calc(100vw * 40 / 1920); height: calc(100vw * 40 / 1920); } .title-box { height: 5%; font-size: calc(100vw * 40 / 1920); font-weight: bold; width: 40%; } .right-box { width: 100%; height: 30%; display: flex; flex-direction: column; align-items: center; justify-content: space-evenly; } .video-box { width: 100%; height: 65%; display: flex; justify-content: center; } .videos { width: 100%; height: 100%; } .face-box { width: 100%; height: 100%; display: flex; flex-direction: column; align-items: center; overflow: auto; } .details { display: flex; width: 100%; height: 90%; margin-top: 4px; } .image-box { width: 30%; height: 100%; display: flex; justify-content: center; } .image-box img { width: calc(100vw * 150 / 1920); height: calc(100vw * 150 / 1920); } .list-box { width: 60%; height: calc(100vw * 300 / 1920); display: flex; flex-direction: column; text-indent: calc(100vw * 16 / 1920); font-size: calc(100vw * 18 / 1920); padding-bottom: 20px; } </style>
第三步:下载axios npm i axios
第四步:下载element-ui
npm i element-ui
//引入 使用
import ElementUI from "element-ui";
import "element-ui/lib/theme-chalk/index.css";
// 引入echarts
import * as echarts from "echarts";
Vue.prototype.$echarts = echarts;
第五步:js部分
首先调用摄像头(这里我也是根据网上找的,但是忘记哪个文章了)
//可以是点击方法,可以放在mounted()中,看自己的需求 getCompetence() { var _this = this; this.thisCancas = document.getElementById("canvas"); this.thisContext = this.thisCancas.getContext("2d"); this.thisVideo = document.getElementById("video"); // 旧版本浏览器可能根本不支持mediaDevices,我们首先设置一个空对象 if (navigator.mediaDevices === undefined) { navigator.mediaDevices = {}; } // 一些浏览器实现了部分mediaDevices,我们不能只分配一个对象 // 使用getUserMedia,因为它会覆盖现有的属性。 // 这里,如果缺少getUserMedia属性,就添加它。 if (navigator.mediaDevices.getUserMedia === undefined) { navigator.mediaDevices.getUserMedia = function(constraints) { // 首先获取现存的getUserMedia(如果存在) var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.getUserMedia; // 有些浏览器不支持,会返回错误信息 // 保持接口一致 if (!getUserMedia) { return Promise.reject( new Error("getUserMedia is not implemented in this browser") ); } // 否则,使用Promise将调用包装到旧的navigator.getUserMedia return new Promise(function(resolve, reject) { getUserMedia.call(navigator, constraints, resolve, reject); }); }; } var constraints = { audio: false, video: { width: this.videoWidth, height: this.videoHeight, transform: "scaleX(-1)" } }; navigator.mediaDevices .getUserMedia(constraints) .then(function(stream) { // 旧的浏览器可能没有srcObject if ("srcObject" in _this.thisVideo) { _this.thisVideo.srcObject = stream; } else { // 避免在新的浏览器中使用它,因为它正在被弃用。 _this.thisVideo.src = window.URL.createObjectURL(stream); } _this.thisVideo.onloadedmetadata = function(e) { _this.thisVideo.play(); }; }) .catch(err => { console.log(err); }); },
记得销毁调用摄像头哦!
// 关闭摄像头 在vue生命周期的销毁页面中写 this.trackerTask.closeCamera();
接下来先把调用百度ai 部分写出来
async detectFace(imageData) { //access_token需要通过申请的key 和secret来获取,登录百度ai文档中就有教程 this.loading = true; try { const response = await axios.post( "https://aip.baidubce.com/rest/2.0/face/v3/detect?access_token=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", { image: imageData, image_type: "BASE64", face_field: "age,gender,emotion,glasses,mask,expression", max_face_num: 10 } ); // 处理接口返回的结果 if (response.data.error_code == 0) { //这里是接口返回的打印 console.log("调用成功显示结果", response); this.faceInfomation = response.data.result; this.faceArray = response.data.result.face_list; //这里我用一个数组接收返回的人脸信息 this.faceArray.forEach((item, i) => { console.log("item", item); //这里是人脸的位置宽高信息 const left = parseInt(item.location.left); const top = parseInt(item.location.top); const width = parseInt(item.location.width); const height = parseInt(item.location.height); const faceCanvas = document.createElement("canvas"); faceCanvas.width = width; faceCanvas.height = height; //将截取的人脸再重新铺到canvas画布上,好展示出来 const faceCtx = faceCanvas.getContext("2d"); faceCtx.drawImage( this.canvas, left, top, width, height, 0, 0, width, height ); this.faceImg = faceCanvas.toDataURL("image/png"); //将截取的头像放在新的数组中 this.newImageArray.push(this.faceImg); }); //因为截取的人脸头像和人脸文字描述不是一个数组,所以将两个数组合并一个数组了,这样在遍历循环列表的时候,更好的画css样式 this.threeImageArray = this.faceArray.map((item, index) => { return { ...item, image: this.newImageArray[index] }; }); } } catch (error) { console.error(error); } finally { this.loading = false; } },如果有不清晰的地方可以留言评论,我会会的,虽然不知道对不对哈哈
摄像头获取的图片放在canvas是上后,转成接口可用的类型
submit() { console.log("点击了这个"); //每次点击的时候都清空一下上一次的内容 this.newImageArray = []; this.faceArray = []; //loading加载 this.fullscreenLoading = true; setTimeout(() => { this.fullscreenLoading = false; }, 2000); let that = this; let canvas = document.getElementById("canvas"); let context = canvas.getContext("2d"); let video = document.getElementById("video"); //将摄像头拍下的照片放在canvas画布上 context.drawImage(video, 0, 0, 1000, 700); //将图片转换成 接口需要的类型 canvas.toBlob(blob => { var reader = new FileReader(); reader.onloadend = () => { this.imageData = reader.result; this.newimage = this.imageData.replace( /^data:image\/\w+;base64,/, "" ); //截取一下图片 this.headerimg = this.imageData.slice(5, 15); this.imageArray.push(this.headerimg); //调用识别方法 this.detectFace(this.newimage); }; reader.readAsDataURL(blob); }); },
完整代码
<template> <div class="main"> <div class="cam"> <div class="video-box"> <div class="videos"> <video id="video" style="width: 100%;height: 100%; object-fit: fill;" preload autoplay loop muted ></video> </div> <div class="button-box" @click="submit()"> <img src="../assets/cam.png" /> </div> </div> <div class="title-box"> 识别分析 </div> <div class="right-box"> <div ref="mainscroll" class="face-box" v-loading="loading" element-loading-text="拼命加载中" element-loading-spinner="el-icon-loading" element-loading-background="transparent" > <div class="details" v-for="(item, index) in threeImageArray" :key="index" > <div class="image-box"> <img :src="item.image" /> </div> <div class="list-box"> <div class="sex-one">年龄:{{ item.age }}</div> </div> </div> </div> </div> <div class="canva-box"> <canvas ref="canvas" id="canvas" width="1000" height="700"></canvas> </div> </div> </div> </template> <script> import axios from "axios"; export default { name: "testTracking", data() { return { loading: false, contentAnwer: false, contentShow: true, API_KEY: "这里是你申请的key", SECRET_KEY: "这里是你申请的secret", imageData: null, newimage: null, faceInfomation: {}, faceArray: [], age: null, imageArray: [], headerimg: "", location: {}, left: 0, top: 0, width: 0, height: 0, faceImg: null, canvas: null, newImageArray: [], threeImageArray: [] }; }, methods: { submit() { console.log("点击了这个"); //每次点击的时候都清空一下上一次的内容 this.newImageArray = []; this.faceArray = []; //loading加载 this.fullscreenLoading = true; setTimeout(() => { this.fullscreenLoading = false; }, 2000); let that = this; let canvas = document.getElementById("canvas"); let context = canvas.getContext("2d"); let video = document.getElementById("video"); //将摄像头拍下的照片放在canvas画布上 context.drawImage(video, 0, 0, 1000, 700); //将图片转换成 接口需要的类型 canvas.toBlob(blob => { var reader = new FileReader(); reader.onloadend = () => { this.imageData = reader.result; this.newimage = this.imageData.replace( /^data:image\/\w+;base64,/, "" ); //截取一下图片 this.headerimg = this.imageData.slice(5, 15); this.imageArray.push(this.headerimg); //调用识别方法 this.detectFace(this.newimage); }; reader.readAsDataURL(blob); }); }, //可以是点击方法,可以放在mounted()中,看自己的需求 getCompetence() { var _this = this; this.thisCancas = document.getElementById("canvas"); this.thisContext = this.thisCancas.getContext("2d"); this.thisVideo = document.getElementById("video"); // 旧版本浏览器可能根本不支持mediaDevices,我们首先设置一个空对象 if (navigator.mediaDevices === undefined) { navigator.mediaDevices = {}; } // 一些浏览器实现了部分mediaDevices,我们不能只分配一个对象 // 使用getUserMedia,因为它会覆盖现有的属性。 // 这里,如果缺少getUserMedia属性,就添加它。 if (navigator.mediaDevices.getUserMedia === undefined) { navigator.mediaDevices.getUserMedia = function(constraints) { // 首先获取现存的getUserMedia(如果存在) var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia || navigator.getUserMedia; // 有些浏览器不支持,会返回错误信息 // 保持接口一致 if (!getUserMedia) { return Promise.reject( new Error("getUserMedia is not implemented in this browser") ); } // 否则,使用Promise将调用包装到旧的navigator.getUserMedia return new Promise(function(resolve, reject) { getUserMedia.call(navigator, constraints, resolve, reject); }); }; } var constraints = { audio: false, video: { width: this.videoWidth, height: this.videoHeight, transform: "scaleX(-1)" } }; navigator.mediaDevices .getUserMedia(constraints) .then(function(stream) { // 旧的浏览器可能没有srcObject if ("srcObject" in _this.thisVideo) { _this.thisVideo.srcObject = stream; } else { // 避免在新的浏览器中使用它,因为它正在被弃用。 _this.thisVideo.src = window.URL.createObjectURL(stream); } _this.thisVideo.onloadedmetadata = function(e) { _this.thisVideo.play(); }; }) .catch(err => { console.log(err); }); }, async detectFace(imageData) { //access_token需要通过申请的key 和secret来获取,登录百度ai文档中就有教程 this.loading = true; try { const response = await axios.post( "https://aip.baidubce.com/rest/2.0/face/v3/detect?access_token=xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx", { image: imageData, image_type: "BASE64", face_field: "age,gender,emotion,glasses,mask,expression", max_face_num: 10 } ); // 处理接口返回的结果 if (response.data.error_code == 0) { //这里是接口返回的打印 console.log("调用成功显示结果", response); this.faceInfomation = response.data.result; this.faceArray = response.data.result.face_list; //这里我用一个数组接收返回的人脸信息 this.faceArray.forEach((item, i) => { console.log("item", item); //这里是人脸的位置宽高信息 const left = parseInt(item.location.left); const top = parseInt(item.location.top); const width = parseInt(item.location.width); const height = parseInt(item.location.height); const faceCanvas = document.createElement("canvas"); faceCanvas.width = width; faceCanvas.height = height; //将截取的人脸再重新铺到canvas画布上,好展示出来 const faceCtx = faceCanvas.getContext("2d"); faceCtx.drawImage( this.canvas, left, top, width, height, 0, 0, width, height ); this.faceImg = faceCanvas.toDataURL("image/png"); //将截取的头像放在新的数组中 this.newImageArray.push(this.faceImg); }); //因为截取的人脸头像和人脸文字描述不是一个数组,所以将两个数组合并一个数组了,这样在遍历循环列表的时候,更好的画css样式 this.threeImageArray = this.faceArray.map((item, index) => { return { ...item, image: this.newImageArray[index] }; }); } } catch (error) { console.error(error); } finally { this.loading = false; } }, }, mounted() { this.getCompetence(); this.canvas = this.$refs.canvas; }, computed: {}, destroyed() { // 关闭摄像头 this.trackerTask.closeCamera(); } }; </script> <style scoped> .main { width: 100%; height: 100vh; display: flex; align-items: center; justify-content: center; } .cam { width: 50%; height: 90%; display: flex; flex-direction: column; align-items: center; background-color: #fff; border-radius: 20px; } .canva-box { width: 100%; display: flex; align-items: center; justify-content: center; position: absolute; top: 0; z-index: -99; } .button-box { position: absolute; top: 35%; border: 2px solid #fff; padding: calc(100vw * 20 / 1920); border-radius: 50%; } .button-box img { width: calc(100vw * 40 / 1920); height: calc(100vw * 40 / 1920); } .title-box { height: 5%; font-size: calc(100vw * 40 / 1920); font-weight: bold; width: 40%; } .right-box { width: 100%; height: 30%; display: flex; flex-direction: column; align-items: center; justify-content: space-evenly; } .video-box { width: 100%; height: 65%; display: flex; justify-content: center; } .videos { width: 100%; height: 100%; } .face-box { width: 100%; height: 100%; display: flex; flex-direction: column; align-items: center; overflow: auto; } .details { display: flex; width: 100%; height: 90%; margin-top: 4px; } .image-box { width: 30%; height: 100%; display: flex; justify-content: center; } .image-box img { width: calc(100vw * 150 / 1920); height: calc(100vw * 150 / 1920); } .list-box { width: 60%; height: calc(100vw * 300 / 1920); display: flex; flex-direction: column; text-indent: calc(100vw * 16 / 1920); font-size: calc(100vw * 18 / 1920); padding-bottom: 20px; } </style>
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。