当前位置:   article > 正文

tfjs posenet

getadjacentkeypoints

原文链接: tfjs posenet

上一篇: requestAnimationFrame 摄像头数据绘制到canvas

下一篇: tf hub mobile_net 使用

tfjs-model

https://github.com/tensorflow/tfjs-models/tree/master/posenet

react

https://github.com/jscriptcoder/tfjs-posenet

4b4600c73d0d414725298800c71cdde6187.jpg

util

  1. import * as posenet from '@tensorflow-models/posenet';
  2. function isAndroid() {
  3. return /Android/i.test(navigator.userAgent);
  4. }
  5. function isiOS() {
  6. return /iPhone|iPad|iPod/i.test(navigator.userAgent);
  7. }
  8. export function isMobile() {
  9. return isAndroid() || isiOS();
  10. }
  11. function toTuple({y, x}) {
  12. return [y, x]
  13. }
  14. export function drawBoundingBox(keypoints, ctx,boundingBoxColor='blue') {
  15. const boundingBox = posenet.getBoundingBox(keypoints);
  16. ctx.rect(
  17. boundingBox.minX, boundingBox.minY, boundingBox.maxX - boundingBox.minX,
  18. boundingBox.maxY - boundingBox.minY);
  19. ctx.strokeStyle = boundingBoxColor;
  20. ctx.stroke();
  21. }
  22. export function drawSegment([ay, ax], [by, bx], color, scale, ctx, lineWidth = 2) {
  23. ctx.beginPath();
  24. ctx.moveTo(ax * scale, ay * scale);
  25. ctx.lineTo(bx * scale, by * scale);
  26. ctx.lineWidth = lineWidth;
  27. ctx.strokeStyle = color;
  28. ctx.stroke();
  29. }
  30. /**
  31. * Draws a pose skeleton by looking up all adjacent keypoints/joints
  32. */
  33. export function drawSkeleton(keypoints, minConfidence, ctx, scale = 1, color = 'red') {
  34. const adjacentKeyPoints =
  35. posenet.getAdjacentKeyPoints(keypoints, minConfidence);
  36. adjacentKeyPoints.forEach((keypoints) => {
  37. drawSegment(
  38. toTuple(keypoints[0].position), toTuple(keypoints[1].position), color,
  39. scale, ctx);
  40. });
  41. }
  42. export function drawPoint(ctx, y, x, r, color) {
  43. ctx.beginPath();
  44. ctx.arc(x, y, r, 0, 2 * Math.PI);
  45. ctx.fillStyle = color;
  46. ctx.fill();
  47. }
  48. export function drawKeypoints(keypoints, minConfidence, ctx, scale = 1, color = 'red') {
  49. for (let i = 0; i < keypoints.length; i++) {
  50. const keypoint = keypoints[i];
  51. if (keypoint.score < minConfidence) {
  52. continue;
  53. }
  54. const {y, x} = keypoint.position;
  55. console.log(x, y)
  56. drawPoint(ctx, y * scale, x * scale, 3, color);
  57. }
  58. }

vue

  1. <template>
  2. <div>
  3. <video id="video" class="video"></video>
  4. <canvas id="pose" class="pose"></canvas>
  5. </div>
  6. </template>
  7. <script>
  8. import * as posenet from '@tensorflow-models/posenet';
  9. import {isMobile, drawKeypoints, drawSkeleton, drawBoundingBox} from './utils'
  10. let VConsole = require('vconsole/dist/vconsole.min.js');
  11. let vConsole = new VConsole();
  12. const imageScaleFactor = 1;
  13. const outputStride = 16;
  14. const flipHorizontal = false;
  15. const videoWidth = 500
  16. const videoHeight = 500
  17. async function setupCamera() {
  18. if (!navigator.mediaDevices || !navigator.mediaDevices.getUserMedia) {
  19. throw new Error(
  20. 'Browser API navigator.mediaDevices.getUserMedia not available');
  21. }
  22. const video = document.getElementById('video');
  23. video.width = videoWidth;
  24. video.height = videoHeight;
  25. const mobile = isMobile();
  26. const stream = await navigator.mediaDevices.getUserMedia({
  27. 'audio': false,
  28. 'video': {
  29. facingMode: 'user',
  30. width: videoWidth,
  31. height: videoHeight,
  32. },
  33. });
  34. video.srcObject = stream;
  35. console.log(video)
  36. return new Promise((resolve) => {
  37. video.onloadedmetadata = () => {
  38. resolve(video);
  39. };
  40. });
  41. }
  42. async function loadVideo() {
  43. const video = await setupCamera();
  44. console.log(video)
  45. video.play();
  46. return video;
  47. }
  48. const minPoseConfidence = 0.1
  49. const minPartConfidence = 0.5
  50. let video = undefined
  51. let net = undefined
  52. let ctx = undefined
  53. let cvs = undefined
  54. async function draw_frame() {
  55. if (!video || !net || !ctx || !cvs)
  56. return
  57. const pose = await net.estimateSinglePose(video, imageScaleFactor, flipHorizontal, outputStride);
  58. let {score, keypoints} = pose
  59. requestAnimationFrame(draw_frame)
  60. ctx.drawImage(video, 0, 0, cvs.width, cvs.height)
  61. if (score >= minPoseConfidence) {
  62. drawKeypoints(keypoints, minPartConfidence, ctx);
  63. drawSkeleton(keypoints, minPartConfidence, ctx);
  64. drawBoundingBox(keypoints, ctx)
  65. }
  66. }
  67. export default {
  68. async mounted() {
  69. video = await loadVideo();
  70. net = await posenet.load();
  71. cvs = document.getElementById('pose')
  72. ctx = cvs.getContext('2d')
  73. cvs.height = 500
  74. cvs.width = 500
  75. draw_frame()
  76. }
  77. }
  78. </script>
  79. <style scoped>
  80. .video {
  81. max-width: 500px;
  82. max-height: 500px;
  83. border: 1px solid black;
  84. }
  85. .pose {
  86. width: 500px;
  87. height: 500px;
  88. border: 1px solid black;
  89. }
  90. </style>

注意版本目前2019年3月10日并不能再1.0下运行

  1. "@tensorflow-models/posenet": "^0.1.1",
  2. "@tensorflow/tfjs": "^0.11.4",

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小丑西瓜9/article/detail/423232
推荐阅读
相关标签
  

闽ICP备14008679号