赞
踩
开发环境:Pycharm
音量控制模块
- import cv2
- import HandTrackingModule as htm
- import numpy as np
- import time
- import math
- from ctypes import cast, POINTER
- from comtypes import CLSCTX_ALL
- from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
- devices = AudioUtilities.GetSpeakers()
- interface = devices.Activate(IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
- volume = cast(interface, POINTER(IAudioEndpointVolume))
- volumeRange = volume.GetVolumeRange() # (-63.5, 0.0, 0.03125)
- minVol = volumeRange[0]
- maxVol = volumeRange[1]
-
- #############################
- wCam, hCam = 640, 480
- #############################
- cap = cv2.VideoCapture(0) # 若使用笔记本自带摄像头则编号为0 若使用外接摄像头 则更改为1或其他编号
- cap.set(3, wCam)
- cap.set(4, hCam)
- pTime = 0
- detector = htm.handDetector()
-
- while True:
- success, img = cap.read()
- img = detector.findHands(img)
- lmList = detector.findPosition(img, draw=False)
- if len(lmList) != 0:
- # print(lmList[4], lmList[8])
- x1, y1, x2, y2 = lmList[4][1], lmList[4][2], lmList[8][1], lmList[8][2]
- xc, yc = (x2 + x1) // 2, (y2 + y1) // 2
- cv2.circle(img, (x1, y1), 15, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (x2, y2), 15, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (xc, yc), 15, (255, 0, 255), cv2.FILLED)
- cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), 3)
-
- length = math.hypot(x2 - x1, y2 -y1) # 15--200
- if length < 25:
- cv2.circle(img, (xc, yc), 15, (0, 255, 0), cv2.FILLED)
- # print(length) # 25--230
- # 下面实现长度到音量的转换
- vol = np.interp(length, [15, 200], [minVol, maxVol])
- volume.SetMasterVolumeLevel(vol, None)
- volBar = np.interp(length, [15, 200], [350, 150])
- volPer = np.interp(length, [15, 200], [0, 100])
-
- cv2.rectangle(img, (20, 150), (50, 350), (255, 0, 255), 2)
- cv2.rectangle(img, (20, int(volBar)), (50, 350), (255, 0, 255), cv2.FILLED)
- cv2.putText(img, f'{int(volPer)}%', (10, 380), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 255), 2)
-
- cTime = time.time()
- fps = 1 / (cTime - pTime)
- pTime = cTime
- cv2.putText(img, f'fps: {int(fps)}', (10, 40), cv2.FONT_HERSHEY_PLAIN, 2, (255, 0, 0), 2)
-
- cv2.imshow("Image", img)
- cv2.waitKey(1)
- import cv2
- import mediapipe as mp
- import time
- import math
-
- class handDetector():
- def __init__(self, mode=False, maxHands=2, model_complexity=1,detectionCon=0.8, trackCon=0.8):
- self.mode = mode
- self.maxHands = maxHands
- self.detectionCon = detectionCon
- self.trackCon = trackCon
- self.model_complexity = model_complexity
- self.mpHands = mp.solutions.hands
- self.hands = self.mpHands.Hands(self.mode, self.maxHands,self.model_complexity, self.detectionCon, self.trackCon)
- self.mpDraw = mp.solutions.drawing_utils
- self.tipIds = [4, 8, 12, 16, 20]
-
- def findHands(self, img, draw=True):
- imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
- self.results = self.hands.process(imgRGB)
-
- print(self.results.multi_handedness) # 获取检测结果中的左右手标签并打印
-
- if self.results.multi_hand_landmarks:
- for handLms in self.results.multi_hand_landmarks:
- if draw:
- self.mpDraw.draw_landmarks(img, handLms, self.mpHands.HAND_CONNECTIONS)
- return img
-
- def findPosition(self, img, draw=True):
- self.lmList = []
- if self.results.multi_hand_landmarks:
- for handLms in self.results.multi_hand_landmarks:
- for id, lm in enumerate(handLms.landmark):
- h, w, c = img.shape
- cx, cy = int(lm.x * w), int(lm.y * h)
- # print(id, cx, cy)
- self.lmList.append([id, cx, cy])
- if draw:
- cv2.circle(img, (cx, cy), 12, (255, 0, 255), cv2.FILLED)
- return self.lmList
-
- def fingersUp(self):
- fingers = []
- # 大拇指
- if self.lmList[self.tipIds[0]][1] > self.lmList[self.tipIds[0] - 1][1]:
- fingers.append(1)
- else:
- fingers.append(0)
-
- # 其余手指
- for id in range(1, 5):
- if self.lmList[self.tipIds[id]][2] < self.lmList[self.tipIds[id] - 2][2]:
- fingers.append(1)
- else:
- fingers.append(0)
-
- # totalFingers = fingers.count(1)
- return fingers
-
- def findDistance(self, p1, p2, img, draw=True, r=15, t=3):
- x1, y1 = self.lmList[p1][1:]
- x2, y2 = self.lmList[p2][1:]
- cx, cy = (x1 + x2) // 2, (y1 + y2) // 2
-
- if draw:
- cv2.line(img, (x1, y1), (x2, y2), (255, 0, 255), t)
- cv2.circle(img, (x1, y1), r, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (x2, y2), r, (255, 0, 255), cv2.FILLED)
- cv2.circle(img, (cx, cy), r, (0, 0, 255), cv2.FILLED)
- length = math.hypot(x2 - x1, y2 - y1)
-
- return length, img, [x1, y1, x2, y2, cx, cy]
-
-
- def main():
- pTime = 0
- cTime = 0
- cap = cv2.VideoCapture(0)
- detector = handDetector()
- while True:
- success, img = cap.read()
- img = detector.findHands(img) # 检测手势并画上骨架信息
-
- lmList = detector.findPosition(img) # 获取得到坐标点的列表
- if len(lmList) != 0:
- print(lmList[4])
-
- cTime = time.time()
- fps = 1 / (cTime - pTime)
- pTime = cTime
-
- cv2.putText(img, 'fps:' + str(int(fps)), (10, 70), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 255), 3)
- cv2.imshow('Image', img)
- cv2.waitKey(1)
-
-
- if __name__ == "__main__":
- main()
最终演示效果
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。