当前位置:   article > 正文

计算机视觉项目:基于Opencv实现手势控制电脑音量_volume.setmastervolumelevel

volume.setmastervolumelevel

环境配置

开发环境:Pycharm

主要软件包:Opencv-python,Mediapipe

源码

音量控制模块

  1. import cv2
  2. import HandTrackingModule as htm
  3. import numpy as np
  4. import time
  5. import math
  6. from ctypes import cast, POINTER
  7. from comtypes import CLSCTX_ALL
  8. from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
  9. devices = AudioUtilities.GetSpeakers()
  10. interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
  11. volume = cast(interface, POINTER(IAudioEndpointVolume))
  12. volumeRange = volume.GetVolumeRange() # (-63.5, 0.0, 0.03125)
  13. minVol = volumeRange[0]
  14. maxVol = volumeRange[1]
  15. #############################
  16. wCam, hCam = 640, 480
  17. #############################
  18. cap = cv2.VideoCapture(0) # 若使用笔记本自带摄像头则编号为0 若使用外接摄像头 则更改为1或其他编号
  19. cap.set(3, wCam)
  20. cap.set(4, hCam)
  21. pTime = 0
  22. detector = htm.handDetector()
  23. while True:
  24. success, img = cap.read()
  25. img = detector.findHands(img)
  26. lmList = detector.findPosition(img, draw=False)
  27. if len(lmList) != 0:
  28. # print(lmList[4], lmList[8])
  29. x1, y1, x2, y2 = lmList[4][1], lmList[4][2], lmList[8][1], lmList[8][2]
  30. xc, yc = (x2 + x1) // 2, (y2 + y1) // 2
  31. cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
  32. cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
  33. cv2.circle(img, (xc, yc), 15, (255, 0, 255), cv2.FILLED)
  34. cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
  35. length = math.hypot(x2 - x1, y2 -y1) # 15--200
  36. if length < 25:
  37. cv2.circle(img, (xc, yc), 15, (0, 255, 0), cv2.FILLED)
  38. # print(length) # 25--230
  39. # 下面实现长度到音量的转换
  40. vol = np.interp(length, [15, 200], [minVol, maxVol])
  41. volume.SetMasterVolumeLevel(vol, None)
  42. volBar = np.interp(length, [15, 200], [350, 150])
  43. volPer = np.interp(length, [15, 200], [0, 100])
  44. cv2.rectangle(img, (20, 150), (50, 350), (255, 0, 255), 2)
  45. cv2.rectangle(img, (20, int(volBar)), (50, 350), (255, 0, 255), cv2.FILLED)
  46. cv2.putText(img, f'{int(volPer)}%', (10, 380), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)
  47. cTime = time.time()
  48. fps = 1 / (cTime - pTime)
  49. pTime = cTime
  50. cv2.putText(img, f'fps: {int(fps)}', (10, 40), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
  51. cv2.imshow("Image", img)
  52. cv2.waitKey(1)

手势跟踪模块
 

  1. import cv2
  2. import mediapipe as mp
  3. import time
  4. import math
  5. class handDetector():
  6. def __init__(self, mode=False, maxHands=2, model_complexity=1,detectionCon=0.8, trackCon=0.8):
  7. self.mode = mode
  8. self.maxHands = maxHands
  9. self.detectionCon = detectionCon
  10. self.trackCon = trackCon
  11. self.model_complexity = model_complexity
  12. self.mpHands = mp.solutions.hands
  13. self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.model_complexity, self.detectionCon, self.trackCon)
  14. self.mpDraw = mp.solutions.drawing_utils
  15. self.tipIds = [4, 8, 12, 16, 20]
  16. def findHands(self, img, draw=True):
  17. imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  18. self.results = self.hands.process(imgRGB)
  19. print(self.results.multi_handedness) # 获取检测结果中的左右手标签并打印
  20. if self.results.multi_hand_landmarks:
  21. for handLms in self.results.multi_hand_landmarks:
  22. if draw:
  23. self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
  24. return img
  25. def findPosition(self, img, draw=True):
  26. self.lmList = []
  27. if self.results.multi_hand_landmarks:
  28. for handLms in self.results.multi_hand_landmarks:
  29. for id, lm in enumerate(handLms.landmark):
  30. h, w, c = img.shape
  31. cx, cy = int(lm.x * w), int(lm.y * h)
  32. # print(id, cx, cy)
  33. self.lmList.append([id, cx, cy])
  34. if draw:
  35. cv2.circle(img, (cx, cy), 12, (255, 0, 255), cv2.FILLED)
  36. return self.lmList
  37. def fingersUp(self):
  38. fingers = []
  39. # 大拇指
  40. if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
  41. fingers.append(1)
  42. else:
  43. fingers.append(0)
  44. # 其余手指
  45. for id in range(1, 5):
  46. if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
  47. fingers.append(1)
  48. else:
  49. fingers.append(0)
  50. # totalFingers = fingers.count(1)
  51. return fingers
  52. def findDistance(self, p1, p2, img, draw=True, r=15, t=3):
  53. x1, y1 = self.lmList[p1][1:]
  54. x2, y2 = self.lmList[p2][1:]
  55. cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
  56. if draw:
  57. cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
  58. cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
  59. cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
  60. cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
  61. length = math.hypot(x2 - x1, y2 - y1)
  62. return length, img, [x1, y1, x2, y2, cx, cy]
  63. def main():
  64. pTime = 0
  65. cTime = 0
  66. cap = cv2.VideoCapture(0)
  67. detector = handDetector()
  68. while True:
  69. success, img = cap.read()
  70. img = detector.findHands(img) # 检测手势并画上骨架信息
  71. lmList = detector.findPosition(img) # 获取得到坐标点的列表
  72. if len(lmList) != 0:
  73. print(lmList[4])
  74. cTime = time.time()
  75. fps = 1 / (cTime - pTime)
  76. pTime = cTime
  77. cv2.putText(img, 'fps:' + str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
  78. cv2.imshow('Image', img)
  79. cv2.waitKey(1)
  80. if __name__ == "__main__":
  81. main()

最终演示效果

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Monodyee/article/detail/80277
推荐阅读
相关标签
  

闽ICP备14008679号