当前位置:   article > 正文

计算机视觉 | OpenCV 实现手势虚拟控制亮度和音量

计算机视觉 | OpenCV 实现手势虚拟控制亮度和音量

Hi,大家好,我是半亩花海。在当今科技飞速发展的时代,我们身边充斥着各种智能设备,然而,如何更便捷地与这些设备进行交互却是一个不断被探索的课题。本文将主要介绍一个基于 OpenCV 手势识别项目,通过手势来控制电脑屏幕亮度音量大小,为用户提供了一种全新的交互方式。


目录

一、代码拆解

1. 导入必要库

2. 初始化手部关键点

3. 数据格式转换

4. 画手势关键点

5. 手势状态缓冲处理

6. 画直线

7. 屏幕亮度和音量控制

8. 初始化摄像头和手部关键点识别器

9. Pygame 界面初始化和事件监听

二、实战演示

1. 亮度——light

2. 音量——voice

3. 菜单——menu

三、完整代码


一、代码拆解

1. 导入必要库

在开始介绍项目的实现细节之前,我们首先需要导入项目所需的必要库。这些库包括:

  • OpenCV:用于处理图像和视频数据。
  • Mediapipe:提供了对手部关键点的识别和跟踪功能。
  • Pygame:用于创建图形界面和显示摄像头捕获的图像。
  • WMI:用于调节电脑屏幕亮度。
  • pycaw:用于控制电脑的音量。
  1. # 导入必要库
  2. import math
  3. import sys
  4. import numpy as np
  5. import cv2
  6. import pygame
  7. import wmi
  8. import mediapipe as mp
  9. from ctypes import cast, POINTER
  10. from comtypes import CLSCTX_ALL
  11. from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
  12. import warnings # 忽略警告
  13. warnings.filterwarnings("ignore")

2. 初始化手部关键点

首先创建一个 HandKeyPoint 类,用于初始化手部关键点检测器,并提供对图像进行处理的方法。

  1. # 手部关键点类
  2. class HandKeyPoint:
  3. def __init__(self,
  4. static_image_mode=False,
  5. max_num_hands=2,
  6. model_complexity=1,
  7. min_detection_confidence=0.5,
  8. min_tracking_confidence=0.5):
  9. # 手部识别api
  10. self.mp_hands = mp.solutions.hands
  11. # 获取手部识别类
  12. self.hands = self.mp_hands.Hands(static_image_mode=static_image_mode,
  13. max_num_hands=max_num_hands,
  14. model_complexity=model_complexity,
  15. min_detection_confidence=min_detection_confidence,
  16. min_tracking_confidence=min_tracking_confidence)
  17. def process(self, image):
  18. # 将BGR转换为RGB
  19. img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  20. # 识别图像中的手势,并返回结果
  21. results = self.hands.process(img)
  22. # numpy格式的数据
  23. np_arr = landmarks_to_numpy(results)
  24. return results, np_arr

3. 数据格式转换

手部关键点的检测结果(将 landmarks 格式的数据)转换为 numpy 数组,以便后续的处理和分析。

  1. # 将landmarks格式的数据转换为numpy格式的数据
  2. def landmarks_to_numpy(results):
  3. """
  4. 将landmarks格式的数据转换为numpy格式的数据
  5. numpy shape:(2, 21, 3)
  6. :param results:
  7. :return:
  8. """
  9. shape = (2, 21, 3)
  10. landmarks = results.multi_hand_landmarks
  11. if landmarks is None:
  12. # 没有检测到手
  13. return np.zeros(shape)
  14. elif len(landmarks) == 1:
  15. # 检测出一只手,先判断是左手还是右手
  16. label = results.multi_handedness[0].classification[0].label
  17. hand = landmarks[0]
  18. # print(label)
  19. if label == "Left":
  20. return np.array(
  21. [np.array([[hand.landmark[i].x, hand.landmark[i].y, hand.landmark[i].z] for i in range(21)]),
  22. np.zeros((21, 3))])
  23. else:
  24. return np.array([np.zeros((21, 3)),
  25. np.array(
  26. [[hand.landmark[i].x, hand.landmark[i].y, hand.landmark[i].z] for i in range(21)])])
  27. elif len(landmarks) == 2:
  28. # print(results.multi_handedness)
  29. lh_idx = 0
  30. rh_idx = 0
  31. for idx, hand_type in enumerate(results.multi_handedness):
  32. label = hand_type.classification[0].label
  33. if label == 'Left':
  34. lh_idx = idx
  35. if label == 'Right':
  36. rh_idx = idx
  37. lh = np.array(
  38. [[landmarks[lh_idx].landmark[i].x, landmarks[lh_idx].landmark[i].y, landmarks[lh_idx].landmark[i].z] for i
  39. in range(21)])
  40. rh = np.array(
  41. [[landmarks[rh_idx].landmark[i].x, landmarks[rh_idx].landmark[i].y, landmarks[rh_idx].landmark[i].z] for i
  42. in range(21)])
  43. return np.array([lh, rh])
  44. else:
  45. return np.zeros((2, 21, 3))

4. 画手势关键点

  1. # 画手势关键点
  2. def draw_landmark(img, results):
  3. if results.multi_hand_landmarks:
  4. for hand_landmark in results.multi_hand_landmarks:
  5. mp.solutions.drawing_utils.draw_landmarks(img,
  6. hand_landmark,
  7. mp.solutions.hands.HAND_CONNECTIONS,
  8. mp.solutions.drawing_styles.get_default_hand_landmarks_style(),
  9. mp.solutions.drawing_styles.get_default_hand_connections_style())
  10. return img

5. 手势状态缓冲处理

为了平滑处理手势状态的变化,我们实现了一个 Buffer 类,用于缓存手势状态的变化,并提供了添加正例和负例的方法。

  1. # 缓冲区类
  2. class Buffer:
  3. def __init__(self, volume=20):
  4. self.__positive = 0
  5. self.state = False
  6. self.__negative = 0
  7. self.__volume = volume
  8. self.__count = 0
  9. def add_positive(self):
  10. self.__count += 1
  11. if self.__positive >= self.__volume:
  12. # 如果正例个数大于容量,将状态定为True
  13. self.state = True
  14. self.__negative = 0
  15. self.__count = 0
  16. else:
  17. self.__positive += 1
  18. if self.__count > self.__volume:
  19. # 如果大于容量次操作后还没有确定状态
  20. self.__positive = 0
  21. self.__count = 0
  22. def add_negative(self):
  23. self.__count += 1
  24. if self.__negative >= self.__volume:
  25. # 如果负例个数大于容量,将状态定为False
  26. self.state = False
  27. self.__positive = 0
  28. else:
  29. self.__negative += 1
  30. if self.__count > self.__volume:
  31. # 如果大于容量次操作后还没有确定状态
  32. self.__positive = 0
  33. self.__count = 0
  34. # print(f"pos:{self.__positive} neg:{self.__negative} count:{self.__count}")
  35. def clear(self):
  36. self.__positive = 0
  37. self.state = False
  38. self.__negative = 0
  39. self.__count = 0

6. 画直线

  1. # 画线函数
  2. def draw_line(frame, p1, p2, color=(255, 127, 0), thickness=3):
  3. """
  4. 画一条直线
  5. :param p1:
  6. :param p2:
  7. :return:
  8. """
  9. return cv2.line(frame, (int(p1[0] * CAM_W), int(p1[1] * CAM_H)), (int(p2[0] * CAM_W), int(p2[1] * CAM_H)), color,
  10. thickness)

7. 屏幕亮度和音量控制

  1. # 控制屏幕亮度
  2. def screen_change(percent): # percent/2即为亮度百分比
  3. SCREEN = wmi.WMI(namespace='root/WMI')
  4. a = SCREEN.WmiMonitorBrightnessMethods()[0]
  5. a.WmiSetBrightness(Brightness=percent, Timeout=500)
  6. # 初始化音量控制
  7. def init_voice():
  8. devices = AudioUtilities.GetSpeakers()
  9. interface = devices.Activate(
  10. IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
  11. volume = cast(interface, POINTER(IAudioEndpointVolume))
  12. volume.SetMute(0, None)
  13. volume_range = volume.GetVolumeRange()
  14. min_volume = volume_range[0]
  15. max_volume = volume_range[1]
  16. return (min_volume, max_volume), volume

8. 初始化摄像头和手部关键点识别器

在项目的初始化阶段,我们需要加载摄像头实例和手部关键点识别实例,以便后续对手势进行识别和处理。

  1. # 加载摄像头实例
  2. cap = cv2.VideoCapture(0)
  3. CAM_W = 640
  4. CAM_H = 480
  5. CAM_SCALE = CAM_W / CAM_H
  6. # 加载手部关键点识别实例
  7. hand = HandKeyPoint()

9. Pygame 界面初始化和事件监听

为了展示手势控制效果,并提供交互界面,我们使用了 Pygame 库。在初始化阶段,我们创建了一个窗口,并设置了标题。同时,我们实现了事件监听功能,以便在需要时退出程序

具体来说,我们使用 Pygame 创建了一个窗口,并将摄像头捕获的图像显示在窗口中。同时,我们利用 Pygame 的事件监听功能,监听用户的键盘事件,例如按下"q"键时退出程序。这样,用户就可以通过手势控制屏幕亮度和音量大小,同时在 Pygame 窗口中观察手势识别效果。

  1. # 初始化pygame
  2. pygame.init()
  3. # 设置窗口全屏
  4. screen = pygame.display.set_mode((800, 600))
  5. pygame.display.set_caption("virtual_control_screen")
  6. # 获取当前窗口大小
  7. window_size = list(screen.get_size())
  8. # 主循环
  9. while True:
  10. ······
  11. # 事件监听 若按q则退出程序
  12. for event in pygame.event.get():
  13. if event.type == pygame.KEYDOWN:
  14. if event.key == pygame.K_q:
  15. sys.exit(0)

二、实战演示

1. 亮度——light

如果 20 < angle < 90,那么“light ready”即手势控制亮度

2. 音量——voice

如果 -20 > angle > -50,那么“voice ready”即手势控制音量

3. 菜单——menu

上述两种情况除外,那么处于“menu”状态即进入菜单

通过演示可以发现,食指与大拇指在屏幕中的距离越远,亮度越高(音量越大),反之越小,实现了通过手势对亮度和音量的控制。


三、完整代码

  1. #!/usr/bin/env python
  2. # -*- coding:utf-8 -*-
  3. """
  4. @Project : virtual
  5. @File : virtual_control.py
  6. @IDE : PyCharm
  7. @Author : 半亩花海
  8. @Date : 2024:02:06 18:01
  9. """
  10. # 导入模块
  11. import math
  12. import sys
  13. import numpy as np
  14. import cv2
  15. import pygame
  16. import wmi
  17. import mediapipe as mp
  18. from ctypes import cast, POINTER
  19. from comtypes import CLSCTX_ALL
  20. from pycaw.pycaw import AudioUtilities, IAudioEndpointVolume
  21. import warnings # 忽略警告
  22. warnings.filterwarnings("ignore")
  23. # 手部关键点类
  24. class HandKeyPoint:
  25. def __init__(self,
  26. static_image_mode=False,
  27. max_num_hands=2,
  28. model_complexity=1,
  29. min_detection_confidence=0.5,
  30. min_tracking_confidence=0.5):
  31. # 手部识别api
  32. self.mp_hands = mp.solutions.hands
  33. # 获取手部识别类
  34. self.hands = self.mp_hands.Hands(static_image_mode=static_image_mode,
  35. max_num_hands=max_num_hands,
  36. model_complexity=model_complexity,
  37. min_detection_confidence=min_detection_confidence,
  38. min_tracking_confidence=min_tracking_confidence)
  39. def process(self, image):
  40. # 将BGR转换为RGB
  41. img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
  42. # 识别图像中的手势,并返回结果
  43. results = self.hands.process(img)
  44. # numpy格式的数据
  45. np_arr = landmarks_to_numpy(results)
  46. return results, np_arr
  47. # 将landmarks格式的数据转换为numpy格式的数据
  48. def landmarks_to_numpy(results):
  49. """
  50. 将landmarks格式的数据转换为numpy格式的数据
  51. numpy shape:(2, 21, 3)
  52. :param results:
  53. :return:
  54. """
  55. shape = (2, 21, 3)
  56. landmarks = results.multi_hand_landmarks
  57. if landmarks is None:
  58. # 没有检测到手
  59. return np.zeros(shape)
  60. elif len(landmarks) == 1:
  61. # 检测出一只手,先判断是左手还是右手
  62. label = results.multi_handedness[0].classification[0].label
  63. hand = landmarks[0]
  64. # print(label)
  65. if label == "Left":
  66. return np.array(
  67. [np.array([[hand.landmark[i].x, hand.landmark[i].y, hand.landmark[i].z] for i in range(21)]),
  68. np.zeros((21, 3))])
  69. else:
  70. return np.array([np.zeros((21, 3)),
  71. np.array(
  72. [[hand.landmark[i].x, hand.landmark[i].y, hand.landmark[i].z] for i in range(21)])])
  73. elif len(landmarks) == 2:
  74. # print(results.multi_handedness)
  75. lh_idx = 0
  76. rh_idx = 0
  77. for idx, hand_type in enumerate(results.multi_handedness):
  78. label = hand_type.classification[0].label
  79. if label == 'Left':
  80. lh_idx = idx
  81. if label == 'Right':
  82. rh_idx = idx
  83. lh = np.array(
  84. [[landmarks[lh_idx].landmark[i].x, landmarks[lh_idx].landmark[i].y, landmarks[lh_idx].landmark[i].z] for i
  85. in range(21)])
  86. rh = np.array(
  87. [[landmarks[rh_idx].landmark[i].x, landmarks[rh_idx].landmark[i].y, landmarks[rh_idx].landmark[i].z] for i
  88. in range(21)])
  89. return np.array([lh, rh])
  90. else:
  91. return np.zeros((2, 21, 3))
  92. # 画手势关键点
  93. def draw_landmark(img, results):
  94. if results.multi_hand_landmarks:
  95. for hand_landmark in results.multi_hand_landmarks:
  96. mp.solutions.drawing_utils.draw_landmarks(img,
  97. hand_landmark,
  98. mp.solutions.hands.HAND_CONNECTIONS,
  99. mp.solutions.drawing_styles.get_default_hand_landmarks_style(),
  100. mp.solutions.drawing_styles.get_default_hand_connections_style())
  101. return img
  102. # 缓冲区类
  103. class Buffer:
  104. def __init__(self, volume=20):
  105. self.__positive = 0
  106. self.state = False
  107. self.__negative = 0
  108. self.__volume = volume
  109. self.__count = 0
  110. def add_positive(self):
  111. self.__count += 1
  112. if self.__positive >= self.__volume:
  113. # 如果正例个数大于容量,将状态定为True
  114. self.state = True
  115. self.__negative = 0
  116. self.__count = 0
  117. else:
  118. self.__positive += 1
  119. if self.__count > self.__volume:
  120. # 如果大于容量次操作后还没有确定状态
  121. self.__positive = 0
  122. self.__count = 0
  123. def add_negative(self):
  124. self.__count += 1
  125. if self.__negative >= self.__volume:
  126. # 如果负例个数大于容量,将状态定为False
  127. self.state = False
  128. self.__positive = 0
  129. else:
  130. self.__negative += 1
  131. if self.__count > self.__volume:
  132. # 如果大于容量次操作后还没有确定状态
  133. self.__positive = 0
  134. self.__count = 0
  135. # print(f"pos:{self.__positive} neg:{self.__negative} count:{self.__count}")
  136. def clear(self):
  137. self.__positive = 0
  138. self.state = False
  139. self.__negative = 0
  140. self.__count = 0
  141. # 画线函数
  142. def draw_line(frame, p1, p2, color=(255, 127, 0), thickness=3):
  143. """
  144. 画一条直线
  145. :param p1:
  146. :param p2:
  147. :return:
  148. """
  149. return cv2.line(frame, (int(p1[0] * CAM_W), int(p1[1] * CAM_H)), (int(p2[0] * CAM_W), int(p2[1] * CAM_H)), color,
  150. thickness)
  151. # 控制屏幕亮度
  152. def screen_change(percent): # percent/2即为亮度百分比
  153. SCREEN = wmi.WMI(namespace='root/WMI')
  154. a = SCREEN.WmiMonitorBrightnessMethods()[0]
  155. a.WmiSetBrightness(Brightness=percent, Timeout=500)
  156. # 初始化音量控制
  157. def init_voice():
  158. devices = AudioUtilities.GetSpeakers()
  159. interface = devices.Activate(
  160. IAudioEndpointVolume._iid_, CLSCTX_ALL, None)
  161. volume = cast(interface, POINTER(IAudioEndpointVolume))
  162. volume.SetMute(0, None)
  163. volume_range = volume.GetVolumeRange()
  164. min_volume = volume_range[0]
  165. max_volume = volume_range[1]
  166. return (min_volume, max_volume), volume
  167. # 加载摄像头实例
  168. cap = cv2.VideoCapture(0)
  169. CAM_W = 640
  170. CAM_H = 480
  171. CAM_SCALE = CAM_W / CAM_H
  172. # 加载手部关键点识别实例
  173. hand = HandKeyPoint()
  174. # 初始化pygame
  175. pygame.init()
  176. # 设置窗口全屏
  177. screen = pygame.display.set_mode((800, 600))
  178. pygame.display.set_caption("virtual_control_screen")
  179. # 获取当前窗口大小
  180. window_size = list(screen.get_size())
  181. # 设置缓冲区
  182. buffer_light = Buffer(10)
  183. buffer_voice = Buffer(10)
  184. last_y = 0
  185. last_2_y = 1
  186. last_2_x = 0
  187. # 初始化声音控制
  188. voice_range, volume = init_voice()
  189. # 设置亮度条参数
  190. bright_bar_length = 300
  191. bright_bar_height = 20
  192. bright_bar_x = 50
  193. bright_bar_y = 100
  194. # 设置音量条参数
  195. vol_bar_length = 300
  196. vol_bar_height = 20
  197. vol_bar_x = 50
  198. vol_bar_y = 50
  199. # 主循环 每次循环就是对每帧的处理
  200. while True:
  201. img_menu = None
  202. lh_index = -1
  203. # 读取摄像头画面
  204. success, frame = cap.read()
  205. # 将opencv中图片格式的BGR转换为常规的RGB
  206. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  207. # 镜面反转
  208. frame = cv2.flip(frame, 1)
  209. # 处理图像
  210. res, arr = hand.process(frame)
  211. frame = draw_landmark(frame, res)
  212. scale = math.hypot((arr[0, 7, 0] - arr[0, 8, 0]),
  213. (arr[0, 7, 1] - arr[0, 8, 1]),
  214. (arr[0, 7, 2] - arr[0, 8, 2]))
  215. # 计算tan值
  216. tan = (arr[0, 0, 1] - arr[0, 12, 1]) / (arr[0, 0, 0] - arr[0, 12, 0])
  217. # 计算角度
  218. angle = np.arctan(tan) * 180 / np.pi
  219. # print(angle)
  220. if 20 < angle < 90:
  221. path = 'resources/menu/light.png'
  222. buffer_light.add_positive()
  223. buffer_voice.add_negative()
  224. # 显示亮度条和亮度刻度值
  225. show_brightness = True
  226. show_volume = False
  227. elif -20 > angle > -50:
  228. path = 'resources/menu/voice.png'
  229. buffer_voice.add_positive()
  230. buffer_light.add_negative()
  231. # 显示音量条和音量刻度值
  232. show_brightness = False
  233. show_volume = True
  234. else:
  235. path = 'resources/menu/menu.png'
  236. buffer_light.add_negative()
  237. buffer_voice.add_negative()
  238. # 不显示刻度值和百分比
  239. show_brightness = False
  240. show_volume = False
  241. # 计算拇指与食指之间的距离
  242. dis = math.hypot(int((arr[1, 4, 0] - arr[1, 8, 0]) * CAM_W), int((arr[1, 4, 1] - arr[1, 8, 1]) * CAM_H))
  243. # 右手映射时的缩放尺度
  244. s = math.hypot((arr[1, 5, 0] - arr[1, 9, 0]), (arr[1, 5, 1] - arr[1, 9, 1]), (arr[1, 5, 2] - arr[1, 9, 2]))
  245. # 调节亮度
  246. if buffer_light.state:
  247. frame = cv2.putText(frame, 'light ready', (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 127, 0))
  248. frame = draw_line(frame, arr[1, 4], arr[1, 8], thickness=5, color=(255, 188, 66))
  249. if dis != 0:
  250. # 线性插值,可以理解为将一个区间中的一个值映射到另一区间内
  251. light = np.interp(dis, [int(500 * s), int(3000 * s)], (0, 100))
  252. # 调节亮度
  253. screen_change(light)
  254. # 调节声音
  255. elif buffer_voice.state:
  256. frame = cv2.putText(frame, 'voice ready', (10, 35), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 127, 0))
  257. frame = draw_line(frame, arr[1, 4], arr[1, 8], thickness=5, color=(132, 134, 248))
  258. if dis != 0:
  259. vol = np.interp(dis, [int(500 * s), int(3000 * s)], voice_range)
  260. # 调节音量
  261. volume.SetMasterVolumeLevel(vol, None)
  262. # 将图片改为与窗口一样的大小
  263. frame = cv2.resize(frame, (int(window_size[1] * CAM_SCALE), window_size[1]))
  264. frame = cv2.transpose(frame)
  265. # 渲染图片
  266. frame = pygame.surfarray.make_surface(frame)
  267. screen.blit(frame, (int(0.5 * (CAM_W - CAM_H * CAM_SCALE)), 0))
  268. img_menu = pygame.image.load(path).convert_alpha()
  269. img_w, img_h = img_menu.get_size()
  270. img_menu = pygame.transform.scale(img_menu, (int(img_w * scale * 5), int(img_h * scale * 5)))
  271. x = (arr[0][9][0] + arr[0][13][0] + arr[0][0][0]) / 3
  272. y = (arr[0][9][1] + arr[0][13][1] + arr[0][0][1]) / 3
  273. x = int(x * window_size[0] - window_size[0] * scale * 3.5)
  274. y = int(y * window_size[1] - window_size[1] * scale * 12)
  275. # print(x, y)
  276. screen.blit(img_menu, (x, y))
  277. # 绘制音量条和亮度条的外框
  278. if show_volume:
  279. pygame.draw.rect(screen, (255, 255, 255), (vol_bar_x, vol_bar_y, vol_bar_length, vol_bar_height), 3)
  280. elif show_brightness:
  281. pygame.draw.rect(screen, (255, 255, 255), (bright_bar_x, bright_bar_y, bright_bar_length, bright_bar_height),
  282. 3)
  283. # 计算当前音量和亮度在条上的位置和大小,并绘制已填充的条
  284. if show_volume:
  285. vol = volume.GetMasterVolumeLevel()
  286. vol_range = voice_range[1] - voice_range[0]
  287. vol_bar_fill_length = int((vol - voice_range[0]) / vol_range * vol_bar_length)
  288. pygame.draw.rect(screen, (0, 255, 0), (vol_bar_x, vol_bar_y, vol_bar_fill_length, vol_bar_height))
  289. # 显示音量刻度值和当前音量大小
  290. vol_text = f"Volume: {int((vol - voice_range[0]) / vol_range * 100)}%"
  291. vol_text_surface = pygame.font.SysFont(None, 24).render(vol_text, True, (255, 255, 255))
  292. screen.blit(vol_text_surface, (vol_bar_x + vol_bar_length + 10, vol_bar_y))
  293. elif show_brightness:
  294. brightness = wmi.WMI(namespace='root/WMI').WmiMonitorBrightness()[0].CurrentBrightness
  295. bright_bar_fill_length = int(brightness / 100 * bright_bar_length)
  296. pygame.draw.rect(screen, (255, 255, 0), (bright_bar_x, bright_bar_y, bright_bar_fill_length, bright_bar_height))
  297. # 显示亮度刻度值和当前亮度大小
  298. bright_text = f"Brightness: {brightness}%"
  299. bright_text_surface = pygame.font.SysFont(None, 24).render(bright_text, True, (255, 255, 255))
  300. screen.blit(bright_text_surface, (bright_bar_x + bright_bar_length + 10, bright_bar_y))
  301. pygame.display.flip()
  302. # 事件监听 若按q则退出程序
  303. for event in pygame.event.get():
  304. if event.type == pygame.KEYDOWN:
  305. if event.key == pygame.K_q:
  306. sys.exit(0)
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/繁依Fanyi0/article/detail/76074
推荐阅读
相关标签
  

闽ICP备14008679号