当前位置:   article > 正文

tensorflow 在 vue 项目中使用示例_vue tensorflow

vue tensorflow

人脸识别示例

1、依赖的第三方库

package.json

  1. {
  2. "peerdependencies": {
  3. "@tensorflow/tfjs-backend-cpu": "3.8.0",
  4. "@tensorflow/tfjs-backend-webgl": "3.8.0",
  5. "@tensorflow/tfjs-backend-wasm": "3.8.0",
  6. "@tensorflow/tfjs-converter": "3.8.0",
  7. "@tensorflow/tfjs-core": "3.8.0"
  8. },
  9. }

2、lib文件夹

anchors.js

  1. const arr = [
  2. ...
  3. [0.9375, 0.9375, 1.0, 1.0]
  4. ];
  5. export default function get_blazeface_priors() {
  6. return arr;
  7. }

FaceDetectionModel.js

  1. import * as tf from '@tensorflow/tfjs-core'
  2. import {load_model} from "./ModelUtils";
  3. import get_blazeface_priors from "./anchors";
  4. const DETECTION_IMAGE_SIZE = 128; //人脸检测的输入张量尺寸 [128,128]
  5. export class FaceDetectionModel {
  6. constructor({
  7. modelPath,
  8. imageSize,
  9. faceConfThresh,
  10. nmsThresh
  11. }) {
  12. this.modelPath = modelPath || null;
  13. this.faceConfThresh = faceConfThresh || 0.6;
  14. this.nmsThresh = nmsThresh || 0.4;
  15. this.imageSize = imageSize || [640, 480];
  16. }
  17. destroy() {
  18. tf.dispose();
  19. tf.disposeVariables();
  20. if (this.model) {
  21. this.model = null;
  22. }
  23. }
  24. async loadModel() {
  25. return this.model = await load_model(this.modelPath);
  26. }
  27. /**
  28. * 人脸检测,外部调用方法
  29. * @param inputTensor
  30. * @returns {Promise<null>} 人脸框
  31. */
  32. async executeFaceDetection(inputTensor) {
  33. var outputFaceBox = null;
  34. const detectionResult = this.predict(inputTensor, DETECTION_IMAGE_SIZE);
  35. const confTensor = detectionResult[0];
  36. const locTensor = detectionResult[1];
  37. // decode boxes and nms
  38. const decodeResult = this.decodeBox(locTensor, get_blazeface_priors());
  39. const decodeLocTensor = decodeResult[0];
  40. const decodeKeyPointTensor = decodeResult[1];
  41. const keepIdxsTensor = await this.nms(decodeLocTensor,
  42. confTensor,
  43. this.faceConfThresh,
  44. this.nmsThresh);
  45. const faceBoxesTensor = tf.gather(decodeLocTensor, keepIdxsTensor);
  46. const faceKeyPointTensor = tf.gather(decodeKeyPointTensor, keepIdxsTensor);
  47. const faceBoxes = faceBoxesTensor.arraySync();
  48. const keyPointList = faceKeyPointTensor.arraySync();
  49. //没有用tidy清除掉的tensor要手动清除
  50. confTensor.dispose();
  51. locTensor.dispose();
  52. decodeLocTensor.dispose();
  53. faceBoxesTensor.dispose();
  54. decodeKeyPointTensor.dispose();
  55. faceKeyPointTensor.dispose();
  56. keepIdxsTensor.dispose();
  57. let x_1, x_2, y_1, y_2;
  58. let xMax, yMax;
  59. let outputKeyPoint = [];
  60. [xMax, yMax] = this.imageSize;
  61. if (faceBoxes.length > 0) {
  62. //从tensor的比例大小,放缩成原来的大小
  63. x_1 = faceBoxes[0][0] * xMax;
  64. y_1 = faceBoxes[0][1] * yMax;
  65. x_2 = faceBoxes[0][2] * xMax;
  66. y_2 = faceBoxes[0][3] * yMax;
  67. //图像向外部扩充一定区域
  68. const w = x_2 - x_1;
  69. const h = y_2 - y_1;
  70. x_1 = Math.round(Math.max(x_1 - w / 10, 0));
  71. x_2 = Math.round(Math.min(x_2 + w / 10, xMax));
  72. y_1 = Math.round(Math.max(y_1 - h / 5, 0));
  73. y_2 = Math.round(Math.min(y_2 + h / 20, yMax));
  74. outputFaceBox = [x_1, y_1, x_2, y_2];
  75. for (let i = 0; i < keyPointList[0].length; i += 2) {
  76. const point = [keyPointList[0][i] * xMax, keyPointList[0][i + 1] * yMax];
  77. outputKeyPoint.push(point);
  78. }
  79. }
  80. return [outputFaceBox, outputKeyPoint];
  81. }
  82. /**
  83. * 预测人脸框
  84. * @param image_tensor
  85. * @param scaled_size
  86. * @returns {[T, Tensor<Rank>]}
  87. */
  88. predict(imageTensor, scaledSize) {
  89. return tf.tidy(() => {
  90. const resizedImg = tf.image.resizeBilinear(imageTensor, [scaledSize, scaledSize]);
  91. //预处理
  92. const inputTensor = tf.expandDims(tf.div(resizedImg, 255.0));
  93. //模型检测
  94. const preds = this.model.predict(inputTensor);
  95. const confTensor = tf.sigmoid(tf.squeeze(tf.concat([preds[1], preds[3]], 1)));
  96. const locTensor = tf.squeeze(tf.concat([preds[2], preds[0]], 1));
  97. return [confTensor, locTensor];
  98. });
  99. }
  100. /**
  101. * 检测框坐标映射至图像的尺寸和位置(在cpu上计算)
  102. * @param loc
  103. * @param anchors
  104. * @returns {*}
  105. */
  106. decodeBox(loc_tensor, anchors) {
  107. return tf.tidy(() => {
  108. //注意loc_tensor不要被tf释放
  109. const reshape_loc_tensor = tf.reshape(loc_tensor, [-1]);
  110. const loc = reshape_loc_tensor.dataSync();
  111. const num_anchor = anchors.length;
  112. const loc_length = 16;
  113. for (let anchor_i = 0; anchor_i < num_anchor; anchor_i++) {
  114. // decode boxes:
  115. // to xywh
  116. loc[anchor_i * loc_length + 0] = loc[anchor_i * loc_length + 0] / 128.0 * anchors[anchor_i][2] + anchors[anchor_i][0];
  117. loc[anchor_i * loc_length + 1] = loc[anchor_i * loc_length + 1] / 128.0 * anchors[anchor_i][3] + anchors[anchor_i][1];
  118. loc[anchor_i * loc_length + 2] = loc[anchor_i * loc_length + 2] / 128.0 * anchors[anchor_i][2];
  119. loc[anchor_i * loc_length + 3] = loc[anchor_i * loc_length + 3] / 128.0 * anchors[anchor_i][3];
  120. // xywh -> xyxy
  121. loc[anchor_i * loc_length + 0] = loc[anchor_i * loc_length + 0] - 0.5 * loc[anchor_i * loc_length + 2];
  122. loc[anchor_i * loc_length + 1] = loc[anchor_i * loc_length + 1] - 0.5 * loc[anchor_i * loc_length + 3];
  123. loc[anchor_i * loc_length + 2] = loc[anchor_i * loc_length + 0] + loc[anchor_i * loc_length + 2];
  124. loc[anchor_i * loc_length + 3] = loc[anchor_i * loc_length + 1] + loc[anchor_i * loc_length + 3];
  125. //decode keypoints
  126. for (let k = 4; k < loc_length; k++) {
  127. if (k % 2 == 0) {
  128. loc[anchor_i * loc_length + k] = loc[anchor_i * loc_length + k] / 128.0 * anchors[anchor_i][2] + anchors[anchor_i][0];
  129. } else {
  130. loc[anchor_i * loc_length + k] = loc[anchor_i * loc_length + k] / 128.0 * anchors[anchor_i][3] + anchors[anchor_i][1];
  131. }
  132. }
  133. }
  134. const decode_loc_tensor = tf.tensor(loc, [num_anchor, loc_length], "float32");
  135. const [boxes_tensor, keypoints_tensor] = tf.split(decode_loc_tensor, [4, 12], 1);
  136. return [boxes_tensor, keypoints_tensor];
  137. });
  138. }
  139. /**
  140. * nms
  141. * @param _boxes
  142. * @param _conf
  143. * @param conf_thresh
  144. * @param nms_thresh
  145. * @returns {Promise<Tensor<Rank>>}
  146. */
  147. async nms(_boxes, _conf, conf_thresh, nms_thresh) {
  148. const tf_boxes_tensor = tf.tidy(() => {
  149. const [x1, y1, x2, y2] = tf.split(_boxes, 4, 1);
  150. return tf.concat([y1, x1, y2, x2], 1);
  151. });
  152. // nms
  153. let select_idx = await tf.image.nonMaxSuppressionAsync(tf_boxes_tensor, _conf, 10, nms_thresh, conf_thresh);
  154. tf_boxes_tensor.dispose();
  155. return select_idx;
  156. }
  157. }
  158. export default FaceDetectionModel;

FaceEngine.js

  1. import * as tf from '@tensorflow/tfjs-core'
  2. import FaceMeshModel from "./FaceMeshModel";
  3. import FaceDetectionModel from "./FaceDetectionModel";
  4. import {newFrameTensor, checkFace} from "./ModelUtils";
  5. export class FaceEngine {
  6. constructor({
  7. detectionModelPath,
  8. meshModelPath,
  9. imageSize,
  10. faceConfThresh,
  11. nmsThresh,
  12. landmarkThresh,
  13. backend
  14. }) {
  15. this.faceDetectionModel = new FaceDetectionModel({
  16. modelPath: detectionModelPath,
  17. imageSize: imageSize,
  18. faceConfThresh: faceConfThresh,
  19. nmsThresh: nmsThresh,
  20. backend: backend
  21. });
  22. this.faceMeshModel = new FaceMeshModel({
  23. modelPath: meshModelPath,
  24. backend: backend
  25. });
  26. this.backend = backend || "webgl"
  27. this.imageSize = imageSize;
  28. this.landmarkThresh = landmarkThresh;
  29. this.currentFrameTensor = null;
  30. this.lastFaceBox = null;
  31. this.isModelReady = false;
  32. }
  33. /**
  34. *
  35. * @returns {Promise<void>}
  36. */
  37. async init() {
  38. await tf.setBackend(this.backend);
  39. await this.faceDetectionModel.loadModel();
  40. await this.faceMeshModel.loadModel();
  41. const preload_time = performance.now();
  42. console.log('start to preload model');
  43. await this.preloadEmptyFrame();
  44. console.log('preload model cost = ', performance.now() - preload_time);
  45. this.isModelReady = true;
  46. }
  47. setImageSize(imageSize){
  48. this.imageSize = imageSize;
  49. this.faceDetectionModel.imageSize = imageSize;
  50. }
  51. /**
  52. * 使用空数据初次加载
  53. * @returns {Promise<number>}
  54. */
  55. async preloadEmptyFrame() {
  56. const frameWidth = this.imageSize[0];
  57. const frameHeight = this.imageSize[1];
  58. const bufferSize = frameWidth * frameHeight * 4;
  59. const imageTensor = newFrameTensor(new Uint8ClampedArray(bufferSize), frameWidth, frameHeight, 3);
  60. if (this.faceDetectionModel) {
  61. await this.faceDetectionModel.executeFaceDetection(imageTensor);
  62. }
  63. if (this.faceMeshModel) {
  64. await this.faceMeshModel.executeFaceMesh(imageTensor, [0, 0, frameWidth, frameHeight]);
  65. }
  66. imageTensor.dispose();
  67. return 0;
  68. }
  69. destroy() {
  70. try{
  71. tf.dispose();
  72. tf.disposeVariables();
  73. if (this.faceDetectionModel) {
  74. this.faceDetectionModel.destroy();
  75. }
  76. if (this.faceMeshModel) {
  77. this.faceMeshModel.destroy();
  78. }
  79. }catch (e) {
  80. console.log("failed to release faceEngine, error = ", e);
  81. }
  82. }
  83. rotateImageTensor(tensor, radian){
  84. return tf.tidy(()=>{
  85. const inputTensor = tf.expandDims(tensor, 0);
  86. const outputTensor = tf.image.rotateWithOffset(inputTensor, radian);
  87. return outputTensor;
  88. });
  89. return
  90. }
  91. calAngle(pLeftEye, pRightEye){
  92. if (pLeftEye && pRightEye){
  93. const leftX = pLeftEye[0];
  94. const leftY = pLeftEye[1];
  95. const rightX = pRightEye[0];
  96. const rightY = pRightEye[1];
  97. const angle = Math.atan((rightY - leftY) / (rightX - leftX)) * 180 / Math.PI;
  98. return angle;
  99. console.log('angle= ', angle);
  100. }else{
  101. return null;
  102. }
  103. }
  104. /**
  105. *
  106. * @param frame
  107. * @returns {Promise<{bbox: *, confidence: *, landmarks: *, time: *}>}
  108. */
  109. async handleImageFrame(frame, constrainsOptions) {
  110. this.startTime = performance.now();
  111. this.faceDetectionBox = null;
  112. this.faceDetectionKeypoints = null;
  113. const frameWidth = this.imageSize[0];
  114. const frameHeight = this.imageSize[1];
  115. if (!frame) {
  116. return this.sendErrorAndDisposeTensor("input frame error");
  117. }
  118. if (!this.isModelReady) {
  119. return this.sendErrorAndDisposeTensor("model is not ready");
  120. }
  121. this.currentFrameTensor = newFrameTensor(frame, frameWidth, frameHeight, 3);
  122. let landmarkFaceBox = null;
  123. //确认是否需要执行人脸检测,并指定关键点使用的人脸框
  124. if (!this.lastFaceBox) {
  125. // if (true) {
  126. const detectionResult = await this.faceDetectionModel.executeFaceDetection(this.currentFrameTensor);
  127. const faceBox = detectionResult[0];
  128. this.faceDetectionBox = faceBox;
  129. this.faceDetectionKeypoints = detectionResult[1];
  130. if (this.faceDetectionKeypoints){
  131. this.calAngle(this.faceDetectionKeypoints[0], this.faceDetectionKeypoints[1]);
  132. }
  133. if (!faceBox) {
  134. return this.sendResultAndDisposeTensor(null, null, -1, "no face");
  135. } else {
  136. landmarkFaceBox = faceBox;
  137. }
  138. } else {
  139. landmarkFaceBox = this.lastFaceBox;
  140. }
  141. let facePreprocessResult = null;
  142. if (constrainsOptions){
  143. facePreprocessResult = checkFace(this.currentFrameTensor, landmarkFaceBox, this.imageSize, constrainsOptions);
  144. }
  145. const meshResult = await this.faceMeshModel.executeFaceMesh(this.currentFrameTensor, landmarkFaceBox);
  146. const landmarksConfidence = meshResult[0];
  147. const landmarksPostion = meshResult[1];
  148. //后面全部在cpu运算,属于js的typearray类型
  149. const filterResult = this.faceMeshModel.decode(landmarksPostion, landmarkFaceBox);
  150. const landmarks = filterResult[0];
  151. const landmarksFaceBox = filterResult[1];
  152. //根据关键点置信度的阈值,更新下一帧的人脸检测框
  153. //如果未达到阈值,则将人脸检测框置空,下一次执行人脸检测
  154. const finalFaceBox = this.updateLastFaceBox(landmarksFaceBox, landmarkFaceBox, landmarksConfidence, this.landmarkThresh);
  155. this.lastFaceBox = finalFaceBox;
  156. if (this.faceDetectionBox){
  157. //优先返回有检测结果的人脸框
  158. return this.sendResultAndDisposeTensor(
  159. this.faceDetectionBox,
  160. landmarks,
  161. landmarksConfidence,
  162. '检测成功', facePreprocessResult);
  163. }else{
  164. //返回采用跟踪时的人脸框
  165. return this.sendResultAndDisposeTensor(
  166. finalFaceBox,
  167. landmarks,
  168. landmarksConfidence,
  169. '检测成功',facePreprocessResult);
  170. }
  171. }
  172. updateLastFaceBox(faceBox, lastFaceBox, confidence, threshold) {
  173. let newFaceBox;
  174. let xMax, yMax;
  175. [xMax, yMax] = this.imageSize;
  176. if (confidence > threshold) {
  177. let x_1, y_1, x_2, y_2;
  178. [x_1, y_1, x_2, y_2] = faceBox;
  179. //图像向外部扩充一定区域
  180. if (lastFaceBox) {
  181. let x_11, y_11, x_22, y_22;
  182. [x_11, y_11, x_22, y_22] = lastFaceBox;
  183. const w = x_22 - x_11;
  184. const h = y_22 - y_11;
  185. const center_x = (x_1 + x_2) / 2;
  186. const center_y = (y_1 + y_2) / 2;
  187. x_1 = Math.round(Math.max(center_x - w / 2, 0));
  188. x_2 = Math.round(Math.min(center_x + w / 2, xMax));
  189. y_1 = Math.round(Math.max(center_y - h / 2, 0));
  190. y_2 = Math.round(Math.min(center_y + h / 2, yMax));
  191. } else {
  192. const w = x_2 - x_1;
  193. const h = y_2 - y_1;
  194. x_1 = Math.round(Math.max(x_1 - w / 5, 0));
  195. x_2 = Math.round(Math.min(x_2 + w / 5, xMax));
  196. y_1 = Math.round(Math.max(y_1 - h / 3, 0));
  197. y_2 = Math.round(Math.min(y_2 + h / 3, yMax));
  198. }
  199. newFaceBox = [x_1, y_1, x_2, y_2];
  200. } else {
  201. newFaceBox = null;
  202. }
  203. return newFaceBox;
  204. }
  205. /**
  206. * 生成输出json字典
  207. * @param message 返回信息
  208. * @param bbox 人脸框array
  209. * @param landmarks 关键点array2D
  210. * @param confidence 关键点置信度
  211. * @param keypoints 人脸定位关键点
  212. * @returns {{bbox: *, confidence: *, landmarks: *, time: *, message: *, keypoints: *}}
  213. */
  214. sendResultAndDisposeTensor(bbox, landmarks, confidence, message, optionsMessage = null) {
  215. if (this.currentFrameTensor) {
  216. this.currentFrameTensor.dispose();
  217. this.currentFrameTensor = null;
  218. }
  219. return {bbox: bbox, landmarks: landmarks, confidence: confidence,
  220. time: performance.now() - this.startTime, message: message, keypoints: this.faceDetectionKeypoints, optionsMessage:optionsMessage};
  221. }
  222. sendErrorAndDisposeTensor(error_message) {
  223. return this.sendResultAndDisposeTensor(null, null, -1, error_message);
  224. }
  225. }
  226. export default FaceEngine;

FaceMeshModel.js

  1. import * as tf from '@tensorflow/tfjs-core'
  2. import {load_model} from "./ModelUtils";
  3. const MESH_IMAGE_SIZE = 192; //人脸关键点的输入张量尺寸 [192, 192]
  4. const id_list = [119, 348, 129, 358, 69, 299, 189, 413, 105, 52, 334,
  5. 282, 70, 46, 300, 276, 230, 450, 9, 35, 265, 139, 368,
  6. 107, 55, 336, 285, 207, 427, 57, 287, 18, 164];
  7. export class FaceMeshModel {
  8. constructor({
  9. modelPath
  10. }) {
  11. this.modelPath = modelPath || null;
  12. this.model = null;
  13. }
  14. destroy() {
  15. tf.dispose();
  16. tf.disposeVariables();
  17. if (this.model) {
  18. this.model = null;
  19. }
  20. }
  21. async loadModel() {
  22. this.model = await load_model(this.modelPath);
  23. }
  24. /**
  25. * 人脸关键点预测
  26. * @param inputTensor 人脸tensor
  27. * @param faceBox 人脸框
  28. * @returns {Promise<*[]>} 置信度、人脸关键点,未经过decode
  29. */
  30. async executeFaceMesh(inputTensor, faceBox) {
  31. let x_1, y_1, x_2, y_2;
  32. [x_1, y_1, x_2, y_2] = faceBox;
  33. let w_c = x_2 - x_1;
  34. let h_c = y_2 - y_1;
  35. const meshResult = this.predict(inputTensor, x_1, y_1, w_c, h_c, MESH_IMAGE_SIZE);
  36. const landmarksPosTensor = meshResult[0];
  37. const landmarksConfTensor = meshResult[1];
  38. const landmarksPos = await landmarksPosTensor.dataSync();
  39. const temp = await landmarksConfTensor.dataSync();
  40. const landmarksConf = temp[0];
  41. //输入图像的tensor后面不用了,需要删除
  42. landmarksPosTensor.dispose();
  43. landmarksConfTensor.dispose();
  44. return [landmarksConf, landmarksPos];
  45. }
  46. /**
  47. * 预测关键点,输出原图像坐标系下的关键点坐标
  48. * @param imageTensor
  49. * @param x_1
  50. * @param y_1
  51. * @param w_c
  52. * @param h_c
  53. * @param resizedSize
  54. * @returns {Tensor<Rank> | Tensor[] | NamedTensorMap}
  55. */
  56. predict(imageTensor, x_1, y_1, w_c, h_c, resizedSize) {
  57. return tf.tidy(() => {
  58. const cropedImg = tf.slice(imageTensor, [y_1, x_1, 0], [h_c, w_c, 3]);
  59. const cropedResizedImg = tf.image.resizeBilinear(cropedImg, [resizedSize, resizedSize]);
  60. const inputTensor = tf.expandDims(tf.div(cropedResizedImg, 255.0));
  61. const preds = this.model.predict(inputTensor);
  62. return preds;
  63. });
  64. }
  65. decode(preds, faceBox) {
  66. let w, h, x_1, y_1, x_2, y_2;
  67. [x_1, y_1, x_2, y_2] = faceBox;
  68. w = x_2 - x_1;
  69. h = y_2 - y_1;
  70. let landmarks = new Array(468);
  71. let pnts = preds;
  72. let x_min = x_1 + w;
  73. let x_max = -1;
  74. let y_min = y_1 + h;
  75. let y_max = -1;
  76. let index = 0;
  77. for (let i = 0; i < pnts.length; i += 3) {
  78. let xp = pnts[i] * w / MESH_IMAGE_SIZE + x_1;
  79. let yp = pnts[i + 1] * h / MESH_IMAGE_SIZE + y_1;
  80. if (id_list.indexOf(index) != -1) {
  81. x_min = Math.min(x_min, xp);
  82. y_min = Math.min(y_min, yp);
  83. x_max = Math.max(x_max, xp);
  84. y_max = Math.max(y_max, yp);
  85. }
  86. landmarks[index] = [xp, yp];
  87. index = index + 1;
  88. }
  89. return [landmarks, [x_min, y_min, x_max, y_max]];
  90. }
  91. }
  92. export default FaceMeshModel;

ModelUtils.js

  1. import * as tf from "@tensorflow/tfjs-core";
  2. import * as tfc from "@tensorflow/tfjs-converter";
  3. import {assert} from "@tensorflow/tfjs-core/dist/util_base";
  4. export function newFrameTensor(frame, width, height, channel) {
  5. return tf.browser.fromPixels(
  6. {
  7. data: new Uint8Array(frame),
  8. width: width,
  9. height: height,
  10. }, channel
  11. );
  12. }
  13. export async function load_model(model_url) {
  14. console.log('start load model, url = ', model_url);
  15. if (!model_url) {
  16. throw new Error("detection_model can't be empty");
  17. }
  18. try {
  19. let model = await tfc.loadGraphModel(model_url);
  20. console.log("load model success, path = ", model_url);
  21. return model;
  22. } catch (e) {
  23. console.log("load model failed, err = ", e);
  24. return null;
  25. }
  26. }
  27. export function rgb2gray(imageTensor) {
  28. const [r, g, b] = tf.split(imageTensor, 3, 2)
  29. let grayTensor = tf.addN([tf.mul(r, 0.299), tf.mul(g, 0.587), tf.mul(b, 0.144)]);
  30. // let output = tf.squeeze(grayTensor);
  31. return grayTensor;
  32. }
  33. export function SMD2(grayTensor) {
  34. // img_t: [1,H,W,4] : rgba
  35. const gray = grayTensor;
  36. let [h, w] = gray.shape;
  37. let img_left = tf.slice(gray, [1, 0], [h - 1, w - 1]);
  38. let img_right = tf.slice(gray, [1, 1], [h - 1, w - 1]);
  39. let img_up = tf.slice(gray, [1, 1], [h - 1, w - 1]);
  40. let img_bottom = tf.slice(gray, [0, 1], [h - 1, w - 1]);
  41. let diff_lr = tf.abs(tf.sub(img_left, img_right));
  42. let diff_ub = tf.abs(tf.sub(img_up, img_bottom));
  43. let vals = tf.mul(diff_lr, diff_ub);
  44. let smd2 = tf.mean(vals);
  45. return smd2;
  46. }
  47. export function getBrightTensor(grayTensor) {
  48. const gray = grayTensor;
  49. let bright = tf.mean(gray);
  50. return bright;
  51. }
  52. export function getBright(rgb_data, width, height) {
  53. let bright = 0;
  54. for (let i = 0; i < width; i++) {
  55. for (let j = 0; j < height; j++) {
  56. let r = rgb_data[3 * (i * width + j) + 0];
  57. let g = rgb_data[3 * (i * width + j) + 1];
  58. let b = rgb_data[3 * (i * width + j) + 2];
  59. bright += 0.2989 * r + 0.5870 * g + 0.1140 * b;
  60. }
  61. }
  62. const count = height * width;
  63. bright /= count;
  64. return bright;
  65. }
  66. export function getFacePosition(faceBox, imageSize, thresholds) {
  67. const minDistance = thresholds.minDistance;
  68. const maxDistance = thresholds.maxDistance;
  69. const xThreshold = thresholds.xThreshold;
  70. const yThreshold = thresholds.yThreshold;
  71. const width = imageSize[0];
  72. const height = imageSize[1];
  73. const imgXCenter = width / 2;
  74. const imgYCenter = height / 2;
  75. let x_1, y_1, x_2, y_2;
  76. [x_1, y_1, x_2, y_2] = faceBox;
  77. const faceXCenter = (x_2 + x_1) / 2;
  78. const faceYCenter = (y_2 + y_1) / 2;
  79. const distanceValue = (x_2 - x_1) / width;
  80. let isDistanceMatch, distanceTip;
  81. if (distanceValue < minDistance) {
  82. isDistanceMatch = false;
  83. // distanceTip = "距离太远";
  84. distanceTip = "太远";
  85. } else if (distanceValue > maxDistance) {
  86. isDistanceMatch = false;
  87. // distanceTip = "距离太近"; //too close
  88. distanceTip = "太近";
  89. } else {
  90. isDistanceMatch = true;
  91. // distanceTip = "距离合适";
  92. distanceTip = "合适";
  93. }
  94. const deltaX = (faceXCenter - imgXCenter);
  95. const deltaY = (faceYCenter - imgYCenter);
  96. // const positionTips = [deltaX >= xThreshold ? deltaX : 0,
  97. // deltaY >= yThreshold ? deltaY : 0]
  98. let isPositionMatch, positionTips, positionTips2;
  99. if (Math.abs(deltaX) >= xThreshold){
  100. isPositionMatch = false;
  101. positionTips = deltaX > 0 ? "请向左移动" : "请向右移动";
  102. positionTips2 = deltaX > 0 ? "偏右" : "偏左";
  103. } else if (Math.abs(deltaY) >= yThreshold){
  104. isPositionMatch = false;
  105. positionTips = deltaY > 0 ? "请向上移动" : "请向下移动";
  106. positionTips2 = deltaY > 0 ? "偏下" : "偏上";
  107. } else {
  108. isPositionMatch = true;
  109. positionTips = "位置合适";
  110. positionTips2 = "合适"
  111. }
  112. return {
  113. isPositionMatch: isPositionMatch,
  114. positionTips: positionTips,
  115. positionTips2: positionTips2,
  116. isDistanceMatch: isDistanceMatch,
  117. distanceTip: distanceTip,
  118. distanceValue: distanceValue
  119. }
  120. }
  121. /**
  122. *
  123. * @param inputTensor
  124. * @param faceBox
  125. * @param constrainsOptions
  126. * @returns {{}}
  127. */
  128. export function checkFace(inputTensor, faceBox, imageSize, constrainsOptions) {
  129. assert(constrainsOptions);
  130. let x_1, y_1, x_2, y_2;
  131. [x_1, y_1, x_2, y_2] = faceBox;
  132. let w_c = x_2 - x_1;
  133. let h_c = y_2 - y_1;
  134. return tf.tidy(() => {
  135. let finalResult = {};
  136. const cropedImg = tf.slice(inputTensor, [y_1, x_1, 0], [h_c, w_c, 3]);
  137. let positionResult = {};
  138. let brightResult = {};
  139. let clarifyResult = {};
  140. if (constrainsOptions.positionConstrains) {
  141. positionResult = getFacePosition(faceBox, imageSize, constrainsOptions.positionConstrains);
  142. finalResult['positionConstrains'] = positionResult;
  143. }
  144. if (constrainsOptions.brightnessConstrains || constrainsOptions.clarifyConstrains) {
  145. let grayTensor = rgb2gray(cropedImg);
  146. if (constrainsOptions.brightnessConstrains) {
  147. assert(constrainsOptions.brightnessConstrains.minValue && constrainsOptions.brightnessConstrains.maxValue);
  148. const bright = getBrightTensor(grayTensor).arraySync();
  149. brightResult['brightnessValue'] = Math.floor(bright);
  150. let isBrightMatch = false;
  151. let brightTips;
  152. if (bright < constrainsOptions.brightnessConstrains.minValue) {
  153. // brightTips = '亮度过低';
  154. brightTips = '过暗';
  155. } else if (bright > constrainsOptions.brightnessConstrains.maxValue) {
  156. // brightTips = '亮度过高';
  157. brightTips = '过高';
  158. } else {
  159. isBrightMatch = true;
  160. // brightTips = '亮度合适';
  161. brightTips = '合适';
  162. }
  163. brightResult['brightTips'] = brightTips;
  164. brightResult['isBrightMatch'] = isBrightMatch;
  165. finalResult['brightnessConstrains'] = brightResult;
  166. }
  167. if (constrainsOptions.clarifyConstrains) {
  168. assert(constrainsOptions.clarifyConstrains.minValue);
  169. const clarify = SMD2(grayTensor).arraySync();
  170. clarifyResult['clarifyValue'] = Math.floor(clarify);
  171. let isClarifyMatch = false;
  172. let clarifyTips;
  173. if (clarify < constrainsOptions.clarifyConstrains.minValue) {
  174. clarifyTips = '清晰度过低';
  175. } else {
  176. isClarifyMatch = true;
  177. clarifyTips = '清晰度合适';
  178. }
  179. clarifyResult['clarifyTips'] = clarifyTips;
  180. clarifyResult['isClarifyMatch'] = isClarifyMatch;
  181. finalResult['clarifyConstrains'] = clarifyResult;
  182. }
  183. }
  184. return finalResult;
  185. });
  186. }

index.js

  1. import {FaceEngine} from "./FaceEngine";
  2. import {FaceMeshModel} from "./FaceMeshModel";
  3. import {FaceDetectionModel} from "./FaceDetectionModel";
  4. export {FaceEngine, FaceMeshModel, FaceDetectionModel};
  5. export default {FaceEngine, FaceMeshModel, FaceDetectionModel};

3、在页面中使用模型

  1. <template>
  2. <div class="main_container">
  3. <div class="ai-diagnosis-wrapper" v-if="pageState==0">
  4. <img class="zb-bg" src="@/assets/camera/zb-bg.jpg" alt=""/>
  5. <div class="adw-btn gray" v-if="modelLoading">模型加载中...</div>
  6. <div class="adw-btn" v-if="!modelLoading" @click="showCamera">开始</div>
  7. </div>
  8. <div class="video-box" v-show="pageState == 1">
  9. <video class="video" id="video" ref="video" playsinline autoplay muted></video>
  10. <div class="video-mask">
  11. <img class="circle-img" src="@/assets/camera/animate-camera-circle.png"/>
  12. </div>
  13. <canvas id="imgCanvas"></canvas>
  14. <canvas class="canvas_render" id="drawerCanvas" ref="canvasRenderRef"></canvas>
  15. <div class="mantle-box">
  16. <div class="message-tips">
  17. <p class="message-tips-p">{{ faceDetect ? "识别成功" : faceDetectorTips }}</p>
  18. </div>
  19. <ul class="photo-env">
  20. <li>
  21. <img class="icon" v-show="!detectStatus" src="@/assets/camera/gy-1.png"/>
  22. <img class="icon" v-show="detectStatus && detectBright === 0" src="@/assets/camera/gy-3.png"/>
  23. <img class="icon" v-show="detectStatus && detectBright !== 0" src="@/assets/camera/gy-2.png"/>
  24. <div class="r-box">
  25. <p class="p-1">拍照光源</p>
  26. <p class="p-2">{{detectBright !== -1 ? brightTxt[detectBright] : (!detectStatus ? '待检' : '无人脸')}}</p>
  27. </div>
  28. </li>
  29. <li>
  30. <img class="icon" v-show="!detectStatus" src="@/assets/camera/wz-1.png"/>
  31. <img class="icon" v-show="detectStatus && (detectPosition === 0 && detectDistance === 0)" src="@/assets/camera/wz-3.png"/>
  32. <img class="icon" v-show="detectStatus && (detectPosition !== 0 || detectDistance !== 0)" src="@/assets/camera/wz-2.png"/>
  33. <div class="r-box">
  34. <p class="p-1">人脸位置</p>
  35. <p class="p-2">{{detectDistance !== -1 ? (detectDistance === 0 ? positionTxt[detectPosition] : distanceTxt[detectDistance]) : (!detectStatus ? '待检' : '无人脸')}}</p>
  36. </div>
  37. </li>
  38. <li>
  39. <img class="icon" v-show="!detectStatus" src="@/assets/camera/jd-1.png"/>
  40. <img class="icon" v-show="detectStatus && detectPosture === 0" src="@/assets/camera/jd-3.png"/>
  41. <img class="icon" v-show="detectStatus && detectPosture !== 0" src="@/assets/camera/jd-2.png"/>
  42. <div class="r-box">
  43. <p class="p-1">人脸角度</p>
  44. <p class="p-2">{{detectPosture !== -1 ? postureTxt[detectPosture] : (!detectStatus ? '待检' : '无人脸')}}</p>
  45. </div>
  46. </li>
  47. </ul>
  48. </div>
  49. </div>
  50. </div>
  51. </template>
  52. <script>
  53. import * as tf from "@tensorflow/tfjs"
  54. import { positionMsg, positionTxt, brightTxt, distanceTxt, brightDetailTxt, postureTxt } from '@/utils/utils.js'
  55. import {FaceEngine} from '@/detector/index';
  56. export default {
  57. name: 'Cameraface',
  58. data () {
  59. return {
  60. pageState: 0,
  61. modelLoading: true, // 模型加载
  62. detectStatus: false,
  63. getImageDataing: false,
  64. startTime: 0, //避免浪费资源,设置两帧检测间隔时间
  65. face_detect_model: '',
  66. faceDetect: false,
  67. faceDetectSuccess: -1, // [-1, 0, 1]
  68. runnerTimeout: '',
  69. detectBright: -1,// 亮度
  70. detectPosition: -1, // 偏移位置
  71. detectDistance: -1, // 偏移距离
  72. detectPosture: -1, // 人脸角度
  73. faceDetectorTips:'请保持人脸在采集框中',
  74. videoWidth: 0,
  75. videoHeight: 0,
  76. //画布尺寸
  77. videoObj: '',
  78. canvasObj: '',
  79. context: '',
  80. streaming: false, // 是否开始捕获媒体
  81. mediaStreamTrack: '',
  82. distanceTxt: distanceTxt,
  83. positionTxt: positionTxt,
  84. brightDetailTxt: brightDetailTxt,
  85. brightTxt: brightTxt,
  86. positionMsg: positionMsg,
  87. postureTxt: postureTxt,
  88. sliceFlag:false,
  89. isUploading: false,
  90. isUploadSuccess: false,
  91. }
  92. },
  93. created () {
  94. this.loadModel()
  95. },
  96. mounted() {
  97. if (!window.URL) {
  98. window.URL = window.URL || window.webkitURL || window.msURL || window.oURL;
  99. }
  100. this.videoObj = this.$refs.video;
  101. this.videoObj.removeAttribute("controls");
  102. this.canvasObj = document.getElementById("imgCanvas");//获取图像帧数据画布
  103. this.context = this.canvasObj.getContext('2d');
  104. this.drawerCanvasObj = document.getElementById("drawerCanvas");//绘制检测结果画布
  105. this.drawerCanvasCtx = this.drawerCanvasObj.getContext('2d');
  106. },
  107. unmounted() {
  108. this.hideCamera(true);
  109. },
  110. beforeDestroy() {
  111. this.hideCamera(true);
  112. },
  113. methods: {
  114. async loadModel() {
  115. console.log('model load start!');
  116. this.frameCanvas = document.createElement('canvas');
  117. this.$toast.loading({
  118. duration: 0,
  119. message: "模型加载中",
  120. forbidClick: true,
  121. });
  122. this.pageState = 0;
  123. this.modelLoading = true
  124. this.isUploading = false;
  125. this.isUploadSuccess = false;
  126. /**SDK:模型加载 */
  127. this.face_detect_model = new FaceEngine({
  128. detectionModelPath:'xxx/FaceDetectionModel.json',
  129. meshModelPath: 'xxx/FaceMeshModel_v1.0.0.json',
  130. wasmPath: 'xxx/zyd_bg_1.wasm',
  131. cache:'localstorage',
  132. backend:'webgl'
  133. });
  134. await this.face_detect_model.init();
  135. this.$toast.clear();
  136. this.modelLoading = false
  137. console.log('model load finished!');
  138. },
  139. getImageData () {
  140. let frameData = null;
  141. if (this.videoWidth&&this.videoHeight) {
  142. if(this.sliceFlag){
  143. /**
  144. * 目的:PC采取中心裁剪方式,从一个大的video里面裁出来canvas大小的帧数据
  145. * 例如:video 宽高:960*640 canvas铺满设备视窗宽高:375 667
  146. * */
  147. const videoWidthCenterX = Math.floor(this.videoWidth / 2);
  148. const videoWidthCenterY = Math.floor(this.videoHeight / 2);
  149. const startX = Math.floor(videoWidthCenterX - this.canvasObj.width / 2);
  150. const startY = Math.floor(videoWidthCenterY - this.canvasObj.height / 2);
  151. this.context.clearRect(0,0,this.canvas1Width, this.canvas1Height)
  152. this.context.translate(this.canvasObj.width,0);
  153. this.context.scale(-1,1) //由于video镜像问题,渲染到canvas后需要进行左右翻转,否则偏左偏右提示相反
  154. this.context.drawImage(this.videoObj, startX, startY, this.canvasObj.width, this.canvasObj.height, 0, 0, this.canvasObj.width, this.canvasObj.height);
  155. frameData = this.context.getImageData(0, 0, this.canvasObj.width, this.canvasObj.height);
  156. this.context.setTransform(1,0,0,1,0,0)
  157. } else {
  158. const videoWidthCenterX = Math.floor(this.videoWidth / 2);
  159. const videoWidthCenterY = Math.floor(this.videoHeight / 2);
  160. const startX = 0;
  161. const startY = 0;
  162. this.context.clearRect(0,0,this.canvas1Width, this.canvas1Height)
  163. this.context.translate(this.canvasObj.width,0);
  164. this.context.scale(-1,1) //由于video镜像问题,渲染到canvas后需要进行左右翻转,否则偏左偏右提示相反
  165. this.context.drawImage(this.videoObj, startX, startY, this.canvasObj.width, this.canvasObj.height, 0, 0, this.canvasObj.width, this.canvasObj.height);
  166. frameData = this.context.getImageData(0, 0, this.canvasObj.width, this.canvasObj.height);
  167. this.context.setTransform(1,0,0,1,0,0)
  168. }
  169. }
  170. this.detectStatus = true; //开始检测
  171. this.checkPhoto(frameData);
  172. },
  173. runnerFunction () {
  174. if (this.getImageDataing || this.showTip || (this.faceDetect) || new Date().getTime() - this.startTime < 600) {
  175. // console.log('不执行任何操作')
  176. } else {
  177. // console.log('开始执行获取像素')
  178. this.getImageDataing = true;
  179. this.getImageData();
  180. this.startTime = new Date().getTime();
  181. }
  182. this.runnerTimeout = requestAnimationFrame(this.runnerFunction);
  183. },
  184. /**
  185. * 检查视频图像帧
  186. */
  187. checkPhoto(frame) {
  188. if(frame){
  189. this.facesModelDetector(frame)
  190. }else{
  191. this.continueDetect()
  192. }
  193. },
  194. facesModelDetector(frame){
  195. if(!this.face_detect_model) {
  196. this.startFaceDetectOver()
  197. return
  198. }
  199. /**SDK:模型检测 */
  200. this.face_detect_model.handleImageFrame(frame, modelUtils.getDefaultCameraConstrains(), false).then(result => {
  201. const bbox = result.bbox;
  202. console.log("检测结果::",result)
  203. if (bbox) {
  204. this.drawerCanvasCtx.clearRect(0, 0, this.drawerCanvasObj.width, this.drawerCanvasObj.height)
  205. this.drawPoints(this.drawerCanvasCtx, [bbox[0], bbox[1]], 25, "red")
  206. this.drawPoints(this.drawerCanvasCtx, [bbox[2], bbox[3]], 25, "red")
  207. if ( commonUtils.checkMouthOpen(result.landmarks, 10)) {
  208. this.faceDetectorTips = "请不要张嘴"
  209. } else {
  210. if (this.checkImg(result.optionsMessage)) {
  211. //位置、清晰度、距离稳定
  212. console.log('检测通过,质量合格')
  213. this.faceDetect = true
  214. this.faceDetectorTips = ""
  215. Toast('采集成功')
  216. // this.stopDetect();
  217. // 测试canvas图像是否和当前video显示一致
  218. let imgUrl = this.canvasObj.toDataURL('image/jpg')
  219. // let alink = document.createElement('a');
  220. // alink.download = 'face'+new Date().getTime()
  221. // alink.href = imgUrl
  222. // document.body.appendChild(alink)
  223. // alink.click()
  224. // document.body.removeChild(alink)
  225. this.upload(this.dataURLtoBlob(imgUrl));
  226. return;
  227. } else {
  228. this.continueDetect()
  229. }
  230. }
  231. this.continueDetect()
  232. } else {
  233. this.startFaceDetectOver()
  234. }
  235. }).catch(err => {
  236. console.log('面部检测失败', err)
  237. this.startFaceDetectOver()
  238. });
  239. },
  240. checkImg(result) {
  241. if (result && result.positionConstrains && result.brightnessConstrains) {
  242. let position = -1, distance = -1, bright = -1, clarify=-1, colorCast=-1, posture=-1,btip='', dtip = '', ptip='',postip='',postureDict;
  243. bright = result.brightnessConstrains.brightTipsVal //0合适 1过亮 2过暗 3不均匀
  244. distance = result.positionConstrains.distanceTipsVal //表示偏远偏近。0合适 1太近 2太远
  245. position = result.positionConstrains.positionTipsVal //表示上下左右偏移。0合适 1偏下 2偏上 3偏左 4偏右
  246. clarify = result.clarifyConstrains.clarifyTipsVal
  247. colorCast = result.colorConstrains.colorCastTipsVal
  248. posture = result.postureConstrains.postureTipsVal // 对应上方提示数值分别为:0(面部正视前方,姿态正确) 1(请摆正面部正视摄像头) 2(请不要向一侧歪头) 3(请不要仰视) 4(请不要俯视)
  249. if(!result.brightnessConstrains.isBrightMatch){
  250. // 亮度不合适
  251. btip = result.brightnessConstrains.brightTips
  252. }
  253. if(!result.postureConstrains.isPostureMatch){
  254. // 姿态不合适
  255. postip = result.postureConstrains.postureTips
  256. }
  257. if(!result.positionConstrains.isDistanceMatch){
  258. dtip = result.positionConstrains.distanceTips
  259. }
  260. if(!result.positionConstrains.isPositionMatch){
  261. ptip = result.positionConstrains.positionTips
  262. }
  263. this.detectPosition = position
  264. this.detectDistance = distance
  265. this.detectPosture = posture
  266. this.detectBright = bright
  267. this.detectClarify = clarify
  268. this.detectColorCast = colorCast
  269. this.faceDetectorTips = btip ? btip:(postip ? postip: (dtip ? dtip : ptip))
  270. this.postureDict = postureDict
  271. if(position === 0 && distance === 0 && clarify===0 && colorCast===0 && posture===0){
  272. return true //质量合格
  273. }
  274. }
  275. return false;
  276. },
  277. drawPoints(ctxDrawer, item, radiu = 5, color = "white") {
  278. ctxDrawer.fillStyle = color
  279. ctxDrawer.beginPath();
  280. ctxDrawer.font = "16px sans-serif";
  281. ctxDrawer.arc(item[0], item[1], 3, 0, 360, false);
  282. ctxDrawer.fill()
  283. ctxDrawer.closePath()
  284. },
  285. showCamera () {
  286. this.openCamera();
  287. },
  288. openCamera () {
  289. this.$toast.loading({
  290. duration: 0,
  291. message: "摄像头调用中",
  292. forbidClick: true,
  293. });
  294. //非https证书报错 because navigator.mediaDevices is undefined
  295. if (navigator.mediaDevices.getUserMedia || navigator.getUserMedia || navigator.webkitGetUserMedia || navigator.mozGetUserMedia) {
  296. this.getUserMedia({
  297. video: {
  298. width: {min: 1152, ideal: 1344, max: 1920},
  299. height: {min: 648, ideal: 756, max: 1080},
  300. facingMode: 'user',
  301. }
  302. });
  303. } else {
  304. this.$toast.clear();
  305. setTimeout(() => {
  306. Toast('你的浏览器不支持访问用户媒体设备')
  307. })
  308. console.log("你的浏览器不支持访问用户媒体设备");
  309. }
  310. },
  311. getUserMedia(constrains) {
  312. /* 获取摄像头对象方法兼容 */
  313. let that = this;
  314. if (navigator.mediaDevices.getUserMedia) {
  315. // 最新标准API、
  316. navigator.mediaDevices.getUserMedia(constrains).then(stream => {
  317. that.getUserMediaSuccess(stream);
  318. }).catch(err => {
  319. that.getUserMediaFail(err);
  320. });
  321. } else if (navigator.webkitGetUserMedia || navigator.mozGetUserMedia) {
  322. // webkit内核浏览器
  323. if (navigator.mediaDevices === undefined) {
  324. navigator.mediaDevices = {};
  325. }
  326. // 一些浏览器部分支持 mediaDevices。我们不能直接给对象设置 getUserMedia
  327. // 因为这样可能会覆盖已有的属性。这里我们只会在没有getUserMedia属性的时候添加它。
  328. if (navigator.mediaDevices.getUserMedia === undefined) {
  329. navigator.mediaDevices.getUserMedia = function(constraints) {
  330. // 首先,如果有getUserMedia的话,就获得它
  331. var getUserMedia = navigator.webkitGetUserMedia || navigator.mozGetUserMedia;
  332. // 一些浏览器根本没实现它 - 那么就返回一个error到promise的reject来保持一个统一的接口
  333. if (!getUserMedia) {
  334. return Promise.reject(new Error('getUserMedia is not implemented in this browser'));
  335. }
  336. // 否则,为老的navigator.getUserMedia方法包裹一个Promise
  337. return new Promise(function(resolve, reject) {
  338. getUserMedia.call(navigator, constraints, resolve, reject);
  339. });
  340. }
  341. }
  342. navigator.mediaDevices.getUserMedia(constrains).then(stream => {
  343. that.getUserMediaSuccess(stream);
  344. }).catch(err => {
  345. that.getUserMediaFail(err);
  346. });
  347. } else if (navigator.getUserMedia) {
  348. // 旧版API
  349. navigator.getUserMedia(constrains).then(stream => {
  350. that.getUserMediaSuccess(stream);
  351. }).catch(err => {
  352. that.getUserMediaFail(err);
  353. });
  354. }
  355. },
  356. /* 获取媒体对象成功 */
  357. getUserMediaSuccess (stream) {
  358. this.pageState = 1;
  359. let _stream = stream
  360. try {
  361. _stream = window.URL.createObjectURL(stream);
  362. } catch (err) {
  363. _stream = stream;
  364. }
  365. this.$toast.clear();
  366. this.mediaStreamTrack = stream; // _stream;
  367. this.videoObj.srcObject = _stream; // 将捕获的视频流传递给video 放弃window.URL.createObjectURL(stream)的使用
  368. this.videoObj.play(); // 播放视频
  369. // 监听视频流就位事件,即视频可以播放了
  370. this.videoObj.addEventListener('canplay', (ev) => {
  371. // this.videoObj.addEventListener('loadedmetadata', (ev) => {
  372. if (!this.streaming) {
  373. this.videoWidth = this.videoObj.videoWidth;
  374. this.videoHeight = this.videoObj.videoHeight;
  375. this.streaming = true;
  376. if(this.videoObj.videoWidth > this.videoObj.videoHeight){
  377. this.sliceFlag = true
  378. }
  379. if(this.sliceFlag){
  380. let imageWidth = window.innerWidth
  381. let imageHeight = window.innerHeight
  382. this.canvasObj.setAttribute('width', imageWidth);
  383. this.canvasObj.setAttribute('height', imageHeight);
  384. this.drawerCanvasObj.setAttribute('width', imageWidth);
  385. this.drawerCanvasObj.setAttribute('height', imageHeight);
  386. this.face_detect_model.setImageSize([imageWidth,imageHeight]) //人脸视窗大小
  387. }else{
  388. this.canvasObj.setAttribute('width', this.videoWidth);
  389. this.canvasObj.setAttribute('height', this.videoHeight);
  390. this.drawerCanvasObj.setAttribute('width', this.videoWidth);
  391. this.drawerCanvasObj.setAttribute('height', this.videoHeight);
  392. this.face_detect_model.setImageSize([this.videoWidth,this.videoHeight]) //人脸视窗大小
  393. }
  394. // 开启检测
  395. this.startDetect()
  396. }
  397. }, false);
  398. },
  399. getUserMediaFail (err) {
  400. /* 获取媒体对象失败 */
  401. this.$toast.clear();
  402. this.hideCamera(false);
  403. setTimeout(() => {
  404. Toast('请检查摄像头是否正常开启')
  405. })
  406. },
  407. hideCamera (isCloseCamera) {
  408. this.stopDetect();
  409. if(this.mediaStreamTrack) {
  410. this.mediaStreamTrack.getTracks()[0].stop(); // 关闭媒体对象
  411. }
  412. },
  413. /* 启动检测 */
  414. async startDetect () {
  415. console.log('开始检测')
  416. this.startTime = new Date().getTime();
  417. this.runnerTimeout = requestAnimationFrame(this.runnerFunction)
  418. },
  419. /* 停止检测 */
  420. stopDetect () {
  421. console.log('停止检测')
  422. this.detectStatus = false
  423. cancelAnimationFrame(this.runnerTimeout);
  424. },
  425. startFaceDetectOver(){
  426. this.getImageDataing = false
  427. this.faceDetect = false
  428. this.detectBright = -1
  429. this.detectPosition = -1
  430. this.detectDistance = -1
  431. this.detectPosture = -1
  432. this.faceDetectorTips = '请保持人脸在采集框中'
  433. },
  434. continueDetect() {
  435. this.getImageDataing = false
  436. },
  437. toggleDetect(){
  438. if(this.detectStatus){
  439. this.stopDetect()
  440. }else{
  441. this.startDetect()
  442. }
  443. },
  444. upload(imageData) {
  445. if (!this.isUploading) {
  446. console.log("调用上传接口");
  447. this.isUploading = true;
  448. uploadImage(imageData, this.onUploadSuccess, this.onUploadFailed);
  449. setTimeout(() => {
  450. this.pageState = 2;
  451. this.hideCamera(true);
  452. }, 3000);
  453. }
  454. },
  455. onUploadSuccess(response) {
  456. console.log('response = ');
  457. console.log(response);
  458. this.isUploading = false;
  459. if (response.data && response.data.code == 200) {
  460. this.isUploadSuccess = true;
  461. console.log('上传成功')
  462. } else {
  463. console.log('上传失败')
  464. }
  465. },
  466. onUploadFailed(error) {
  467. console.log('uploading image failed, error= ', error);
  468. console.log('网络错误')
  469. },
  470. dataURLtoBlob(dataurl) {
  471. let arr = dataurl.split(','), mime = arr[0].match(/:(.*?);/)[1],
  472. bstr = atob(arr[1]), n = bstr.length, u8arr = new Uint8Array(n);
  473. while (n--) {
  474. u8arr[n] = bstr.charCodeAt(n);
  475. }
  476. return new Blob([u8arr], {type: mime});
  477. },
  478. }
  479. }
  480. </script>
  481. <style lang="less" scoped>
  482. #imgCanvas{
  483. position: fixed;
  484. z-index: -99;
  485. display: block;
  486. left: 300%;
  487. }
  488. * {
  489. margin: 0;
  490. padding: 0;
  491. font-family: 'FZYANS_JW';
  492. }
  493. .main_container {
  494. position: fixed;
  495. top: 0;
  496. left: 0;
  497. right: 0;
  498. bottom: 0;
  499. z-index: 0;
  500. .ai-diagnosis-wrapper {
  501. position: absolute;
  502. overflow: hidden;
  503. width: 100%;
  504. height: 100%;
  505. background-color: #787a7a;
  506. display: flex;
  507. flex-direction: column;
  508. text-align: center;
  509. align-items: center;
  510. .zb-bg {
  511. display: block;
  512. width: 100%;
  513. }
  514. }
  515. .video-box {
  516. position: absolute;
  517. overflow: hidden;
  518. width: 100%;
  519. height: 100%;
  520. .video {
  521. position: absolute;
  522. display: block;
  523. width: 100%;
  524. height: 100%;
  525. background: black;
  526. transform: rotateY(180deg);
  527. -webkit-transform: rotateY(180deg); /* Safari 和 Chrome */
  528. -moz-transform: rotateY(180deg);
  529. object-fit: cover;
  530. z-index: 99;
  531. }
  532. .canvas_render {
  533. /*visibility: hidden;*/
  534. position: absolute;
  535. top: 0px;
  536. left: 0px;
  537. width: 100%;
  538. height: 100%;
  539. background: transparent;
  540. transform: rotateY(180deg);
  541. -webkit-transform: rotateY(180deg); /* Safari 和 Chrome */
  542. -moz-transform: rotateY(180deg);
  543. object-fit: cover;
  544. z-index: 110;
  545. }
  546. .video-mask {
  547. position: absolute;
  548. top: 1.47rem;
  549. left: 50%;
  550. width: 2.74rem;
  551. z-index: 99;
  552. transform: translateX(-50%);
  553. .circle-img {
  554. width: 100%;
  555. box-shadow: 0 0 100vh 100vh rgba(0, 0, 0, 0.50);
  556. border-radius: 100%;
  557. }
  558. }
  559. .mantle-box {
  560. z-index: 101;
  561. position: absolute;
  562. top: 0px;
  563. left: 0px;
  564. width: 100%;
  565. height: 100%;
  566. .message-tips {
  567. position: absolute;
  568. display: flex;
  569. align-items: center;
  570. justify-content: center;
  571. top: 1.25rem;
  572. width: 100%;
  573. object-fit: cover;
  574. .message-tips-img {
  575. position: absolute;
  576. width: 70%;
  577. z-index: 103;
  578. }
  579. .message-tips-p {
  580. position: absolute;
  581. flex: auto;
  582. font-family: PingFang SC;
  583. color: white;
  584. font-weight: bold;
  585. font-size: 0.2rem;
  586. z-index: 104;
  587. }
  588. }
  589. .photo-env {
  590. position: absolute;
  591. display: flex;
  592. align-items: center;
  593. justify-content: space-between;
  594. bottom: 0.5rem;
  595. width: 100%;
  596. padding: 0 0.3rem;
  597. box-sizing: border-box;
  598. li {
  599. width: 0.84rem;
  600. height: 0.765rem;
  601. background: rgba(0, 0, 0, 0.1);
  602. border-radius: 0.12rem;
  603. display: flex;
  604. align-items: center;
  605. flex-direction: column;
  606. text-align: center;
  607. .icon {
  608. flex-shrink: 1;
  609. width: 0.29rem;
  610. }
  611. .r-box {
  612. .p-1 {
  613. color: #C9C9C9;
  614. font-size: 0.13rem;
  615. }
  616. .p-2 {
  617. font-weight: 600;
  618. color: #ffffff;
  619. font-size: 0.15rem;
  620. }
  621. }
  622. }
  623. }
  624. .txt {
  625. margin-top: 0.25rem;
  626. text-align: center;
  627. font-weight: 600;
  628. color: white;
  629. font-size: 0.16rem;
  630. }
  631. .tip {
  632. width: 100%;
  633. box-sizing: border-box;
  634. z-index: 101;
  635. position: absolute;
  636. bottom: 0.07rem;
  637. left: 0;
  638. color: white;
  639. font-size: 0.12rem;
  640. text-align: center;
  641. }
  642. }
  643. }
  644. }
  645. .adw-btn {
  646. width: 2.68rem;
  647. height: 0.502rem;
  648. background-image: url("../assets/home/btn-bg.png");
  649. background-size: 100%;
  650. text-align: center;
  651. line-height: 0.502rem;
  652. position: fixed;
  653. bottom: 0.42rem;
  654. left: 50%;
  655. transform: translateX(-50%);
  656. font-size: 0.22rem;
  657. color: #FFFFFF;
  658. font-weight: bold;
  659. &.gray {
  660. opacity: 0.4;
  661. }
  662. }
  663. </style>

 

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/繁依Fanyi0/article/detail/423203
推荐阅读
相关标签
  

闽ICP备14008679号