当前位置:   article > 正文

qt部署yolov5_qt yolo

qt yolo

目录

                       

前言

本文章主要使用pyqt5搭建YOLOV5检测平台,使用opencv里面的DNN进行检测 

参考:https://github.com/Transformer-man/yolov5_onnx_dnn.git

一、准备工作

1、代码下载

                百度网盘: https://pan.baidu.com/s/1tYc3qviXKCENLyKc_R0pUg?pwd=yyds 提取码: yyds 

2、环境安装

2.1、pyqt5安装                               

pip install PyQt5

 或

pip install PyQt5 -i https://pypi.tuna.tsinghua.edu.cn/simple  

2.2、opencv安装

pip install opencv-python    

                        

二、效果展示

可进行实时识别和mp4识别

qt界面

三、整体代码:

点击打开视频会将视频保存在input_image/text.mp4 后点击运行会将识别后的视频存放在output_image/text.mp4 并进行播放

qt代码:

  1. import sys
  2. import os
  3. import shutil
  4. from PyQt5.QtGui import QImage, QPixmap
  5. from yolov5_dnn import yolov5
  6. from yolov5_dnn import mult_test
  7. import subprocess
  8. from PyQt5.QtWidgets import QApplication, QWidget, QFileDialog, QPushButton, QVBoxLayout, QHBoxLayout, QLabel, QSlider
  9. from PyQt5.QtMultimedia import QMediaPlayer, QMediaContent
  10. from PyQt5.QtMultimediaWidgets import QVideoWidget
  11. from PyQt5.QtCore import Qt, QUrl, QTimer
  12. import cv2
  13. class RealtimeDetection(QWidget):
  14. def __init__(self):
  15. super().__init__()
  16. # 创建媒体播放器和视频显示部件
  17. self.player = QMediaPlayer(self)
  18. self.video_widget = QVideoWidget(self)
  19. self.player.setVideoOutput(self.video_widget)
  20. self.label = QLabel(self)
  21. self.label.hide()
  22. # self.label.setAlignment(Qt.AlignCenter)
  23. # self.label.setAlignment(Qt.AlignVCenter)
  24. self.label.setGeometry(100,1,400,500)
  25. # self.label.setFixedSize(400,300)
  26. self.setFixedSize(640, 480)
  27. # 创建控制按钮和进度条
  28. self.open_button = QPushButton("打开视频", self)
  29. self.play_button = QPushButton("播放", self)
  30. self.pause_button = QPushButton("暂停", self)
  31. self.progress_bar = QSlider(Qt.Horizontal, self)
  32. self.progress_bar.setRange(0, 0)
  33. self.progress_bar.sliderMoved.connect(self.set_position)
  34. self.detect_button = QPushButton("开始识别", self)
  35. self.realtime_detect_button = QPushButton("实时识别", self)
  36. self.stop_realtime_detection=QPushButton("停止实时识别",self)
  37. # 设置布局
  38. button_layout = QHBoxLayout()
  39. button_layout.addWidget(self.open_button)
  40. button_layout.addWidget(self.play_button)
  41. button_layout.addWidget(self.pause_button)
  42. button_layout.addWidget(self.detect_button)
  43. button_layout.addWidget(self.realtime_detect_button)
  44. button_layout.addWidget(self.stop_realtime_detection)
  45. # button_layout.addWidget(self.label, stretch=1) # 将self.label添加到布局中,并设置stretch参数
  46. layout = QVBoxLayout()
  47. layout.addLayout(button_layout)
  48. layout.addWidget(self.video_widget)
  49. layout.addWidget(self.progress_bar)
  50. self.setLayout(layout)
  51. # 信号与槽连接
  52. self.open_button.clicked.connect(self.open_video)
  53. self.play_button.clicked.connect(self.play_video)
  54. self.pause_button.clicked.connect(self.player.pause)
  55. self.player.durationChanged.connect(self.progress_bar.setMaximum)
  56. self.player.positionChanged.connect(self.progress_bar.setValue)
  57. self.realtime_detect_button.clicked.connect(self.start_realtime_detection)
  58. self.detect_button.clicked.connect(self.start_detection)
  59. self.stop_realtime_detection.clicked.connect(self.stop_recognition)
  60. # 存储视频文件的路径
  61. self.video_path = ""
  62. # 打开摄像头
  63. # self.cap = cv2.VideoCapture(0) # 0表示默认摄像头
  64. def open_video(self):
  65. # 打开视频文件
  66. file_path = QFileDialog.getOpenFileName(self, "选择视频文件")[0]
  67. if file_path:
  68. media = QMediaContent(QUrl.fromLocalFile(file_path))
  69. self.player.setMedia(media)
  70. self.player.play()
  71. # 将视频文件路径存储在self.video_path中
  72. self.video_path = file_path
  73. def set_position(self, position):
  74. self.player.setPosition(position)
  75. def play_video(self):
  76. # 如果当前是停止状态,设置新的视频路径并播放
  77. if self.player.state() == QMediaPlayer.StoppedState:
  78. video_path = "/Volumes/Hard_disk/yolov5_onnx_dnn-master-3/output_image/text.mp4"
  79. self.player.setMedia(QMediaContent(QUrl.fromLocalFile(video_path)))
  80. self.player.play()
  81. def start_detection(self):
  82. # 创建input_image文件夹用于存放输入视频
  83. input_folder = "input_image"
  84. os.makedirs(input_folder, exist_ok=True)
  85. # 将选定的视频复制到input_image文件夹中,保存为text.mp4
  86. input_video_path = os.path.join(input_folder, 'text.mp4')
  87. shutil.copyfile(self.video_path, input_video_path)
  88. onnx_path = r'./yolov5s.onnx'
  89. input_path = r'./input_image'
  90. save_path = r'./output_image'
  91. mult_test(onnx_path, input_path, save_path, video=False)
  92. video_path = "/Volumes/Hard_disk/yolov5_onnx_dnn-master-3/output_image/text.mp4"
  93. self.player.setMedia(QMediaContent(QUrl.fromLocalFile(video_path)))
  94. self.player.play()
  95. def realtime_detection(self):
  96. self.cap = cv2.VideoCapture(0) # 0表示默认摄像头
  97. onnx_path = r'./yolov5s.onnx'
  98. model = yolov5(onnx_path)
  99. frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  100. frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  101. fps = self.cap.get(cv2.CAP_PROP_FPS) # 视频平均帧率
  102. size = (frame_height, frame_width) # 尺寸和帧率和原视频相同
  103. fourcc = cv2.VideoWriter_fourcc(*'XVID')
  104. out = cv2.VideoWriter('zi.mp4', fourcc, fps, size)
  105. ret, frame = self.cap.read()
  106. if not ret:
  107. print("无法读取")
  108. else:
  109. frame = model.detect(frame)
  110. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  111. image = QImage(frame.data, frame_width, frame_height, QImage.Format_RGB888)
  112. scaled_image = image.scaled(self.label.size(), Qt.KeepAspectRatio)
  113. self.label.setPixmap(QPixmap.fromImage(scaled_image))
  114. self.label.show() # 显示self.label
  115. def start_realtime_detection(self):
  116. self.timer = QTimer(self)
  117. self.timer.timeout.connect(self.realtime_detection)
  118. self.timer.start(30)
  119. def stop_recognition(self):
  120. self.label.clear()
  121. self.label.hide()
  122. self.timer.stop()
  123. self.cap.release()
  124. if __name__ == '__main__':
  125. app = QApplication(sys.argv)
  126. player = RealtimeDetection()
  127. player.show()
  128. sys.exit(app.exec_())

图像处理代码:

参考:https://github.com/Transformer-man/yolov5_onnx_dnn.git

  1. import cv2
  2. import numpy as np
  3. import time
  4. import os
  5. from numpy import array
  6. class Colors:
  7. # Ultralytics color palette https://ultralytics.com/
  8. def __init__(self):
  9. # hex = matplotlib.colors.TABLEAU_COLORS.values()
  10. hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB',
  11. '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7')
  12. self.palette = [self.hex2rgb('#' + c) for c in hex]
  13. self.n = len(self.palette)
  14. def __call__(self, i, bgr=False):
  15. c = self.palette[int(i) % self.n]
  16. return (c[2], c[1], c[0]) if bgr else c
  17. @staticmethod
  18. def hex2rgb(h): # rgb order (PIL)
  19. return tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))
  20. colors = Colors()
  21. class yolov5():
  22. def __init__(self, onnx_path, confThreshold=0.25, nmsThreshold=0.45):
  23. self.classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat',
  24. 'traffic light',
  25. 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
  26. 'cow',
  27. 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase',
  28. 'frisbee',
  29. 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
  30. 'surfboard',
  31. 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
  32. 'apple',
  33. 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
  34. 'couch',
  35. 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
  36. 'cell phone',
  37. 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
  38. 'teddy bear',
  39. 'hair drier', 'toothbrush']
  40. self.colors = [np.random.randint(0, 255, size=3).tolist() for _ in range(len(self.classes))]
  41. num_classes = len(self.classes)
  42. self.anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
  43. self.nl = len(self.anchors)
  44. self.na = len(self.anchors[0]) // 2
  45. self.no = num_classes + 5
  46. self.stride = np.array([8., 16., 32.])
  47. self.inpWidth = 640
  48. self.inpHeight = 640
  49. self.net = cv2.dnn.readNetFromONNX(onnx_path)
  50. self.confThreshold = confThreshold
  51. self.nmsThreshold = nmsThreshold
  52. def _make_grid(self, nx=20, ny=20):
  53. xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
  54. return np.stack((xv, yv), 2).reshape((-1, 2)).astype(np.float32)
  55. def letterbox(self, im, new_shape=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True, stride=32):
  56. # Resize and pad image while meeting stride-multiple constraints
  57. shape = im.shape[:2] # current shape [height, width]
  58. if isinstance(new_shape, int):
  59. new_shape = (new_shape, new_shape)
  60. # Scale ratio (new / old)
  61. r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])
  62. if not scaleup: # only scale down, do not scale up (for better val mAP)
  63. r = min(r, 1.0)
  64. # Compute padding
  65. ratio = r, r # width, height ratios
  66. new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
  67. dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1] # wh padding
  68. if auto: # minimum rectangle
  69. dw, dh = np.mod(dw, stride), np.mod(dh, stride) # wh padding
  70. elif scaleFill: # stretch
  71. dw, dh = 0.0, 0.0
  72. new_unpad = (new_shape[1], new_shape[0])
  73. ratio = new_shape[1] / shape[1], new_shape[0] / shape[0] # width, height ratios
  74. dw /= 2 # divide padding into 2 sides
  75. dh /= 2
  76. if shape[::-1] != new_unpad: # resize
  77. im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
  78. top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
  79. left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
  80. im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
  81. return im, ratio, (dw, dh)
  82. def box_area(self,boxes :array):
  83. return (boxes[:, 2] - boxes[:, 0]) * (boxes[:, 3] - boxes[:, 1])
  84. def box_iou(self,box1 :array, box2: array):
  85. """
  86. :param box1: [N, 4]
  87. :param box2: [M, 4]
  88. :return: [N, M]
  89. """
  90. area1 = self.box_area(box1) # N
  91. area2 = self.box_area(box2) # M
  92. # broadcasting, 两个数组各维度大小 从后往前对比一致, 或者 有一维度值为1;
  93. lt = np.maximum(box1[:, np.newaxis, :2], box2[:, :2])
  94. rb = np.minimum(box1[:, np.newaxis, 2:], box2[:, 2:])
  95. wh = rb - lt
  96. wh = np.maximum(0, wh) # [N, M, 2]
  97. inter = wh[:, :, 0] * wh[:, :, 1]
  98. iou = inter / (area1[:, np.newaxis] + area2 - inter)
  99. return iou # NxM
  100. def numpy_nms(self, boxes :array, scores :array, iou_threshold :float):
  101. idxs = scores.argsort() # 按分数 降序排列的索引 [N]
  102. keep = []
  103. while idxs.size > 0: # 统计数组中元素的个数
  104. max_score_index = idxs[-1]
  105. max_score_box = boxes[max_score_index][None, :]
  106. keep.append(max_score_index)
  107. if idxs.size == 1:
  108. break
  109. idxs = idxs[:-1] # 将得分最大框 从索引中删除; 剩余索引对应的框 和 得分最大框 计算IoU;
  110. other_boxes = boxes[idxs] # [?, 4]
  111. ious = self.box_iou(max_score_box, other_boxes) # 一个框和其余框比较 1XM
  112. idxs = idxs[ious[0] <= iou_threshold]
  113. keep = np.array(keep)
  114. return keep
  115. def xywh2xyxy(self,x):
  116. # Convert nx4 boxes from [x, y, w, h] to [x1, y1, x2, y2] where xy1=top-left, xy2=bottom-right
  117. # y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x)
  118. y = np.copy(x)
  119. y[:, 0] = x[:, 0] - x[:, 2] / 2 # top left x
  120. y[:, 1] = x[:, 1] - x[:, 3] / 2 # top left y
  121. y[:, 2] = x[:, 0] + x[:, 2] / 2 # bottom right x
  122. y[:, 3] = x[:, 1] + x[:, 3] / 2 # bottom right y
  123. return y
  124. def non_max_suppression(self,prediction, conf_thres=0.25,agnostic=False): #25200 = 20*20*3 + 40*40*3 + 80*80*3
  125. xc = prediction[..., 4] > conf_thres # candidates,获取置信度,prediction为所有的预测结果.shape(1, 25200, 21),batch为1,25200个预测结果,21 = x,y,w,h,c + class个数
  126. # Settings
  127. min_wh, max_wh = 2, 4096 # (pixels) minimum and maximum box width and height
  128. max_nms = 30000 # maximum number of boxes into torchvision.ops.nms()
  129. output = [np.zeros((0, 6))] * prediction.shape[0]
  130. # for p in prediction:
  131. # for i in p:
  132. # with open('./result.txt','a') as f:
  133. # f.write(str(i) + '\n')
  134. for xi, x in enumerate(prediction): # image index, image inference
  135. # Apply constraints
  136. x = x[xc[xi]] # confidence,获取confidence大于conf_thres的结果
  137. if not x.shape[0]:
  138. continue
  139. # Compute conf
  140. x[:, 5:] *= x[:, 4:5] # conf = obj_conf * cls_conf
  141. # Box (center x, center y, width, height) to (x1, y1, x2, y2)
  142. box = self.xywh2xyxy(x[:, :4])
  143. # Detections matrix nx6 (xyxy, conf, cls)
  144. conf = np.max(x[:, 5:], axis=1) #获取类别最高的置信度
  145. j = np.argmax(x[:, 5:],axis=1) #获取下标
  146. #转为array: x = torch.cat((box, conf, j.float()), 1)[conf.view(-1) > conf_thres]
  147. re = np.array(conf.reshape(-1)> conf_thres)
  148. #转为维度
  149. conf =conf.reshape(-1,1)
  150. j = j.reshape(-1,1)
  151. #numpy的拼接
  152. x = np.concatenate((box,conf,j),axis=1)[re]
  153. # Check shape
  154. n = x.shape[0] # number of boxes
  155. if not n: # no boxes
  156. continue
  157. elif n > max_nms: # excess boxes
  158. x = x[x[:, 4].argsort(descending=True)[:max_nms]] # sort by confidence
  159. # Batched NMS
  160. c = x[:, 5:6] * (0 if agnostic else max_wh) # classes
  161. boxes, scores = x[:, :4] + c, x[:, 4] # boxes (offset by class), scores
  162. i = self.numpy_nms(boxes, scores, self.nmsThreshold)
  163. output[xi] = x[i]
  164. return output
  165. def detect(self, srcimg):
  166. im = srcimg.copy()
  167. im, ratio, wh = self.letterbox(srcimg, self.inpWidth, stride=self.stride, auto=False)
  168. # Sets the input to the network
  169. blob = cv2.dnn.blobFromImage(im, 1 / 255.0,swapRB=True, crop=False)
  170. self.net.setInput(blob)
  171. outs = self.net.forward(self.net.getUnconnectedOutLayersNames())[0]
  172. #NMS
  173. pred = self.non_max_suppression(outs, self.confThreshold,agnostic=False)
  174. #draw box
  175. for i in pred[0]:
  176. left = int((i[0] - wh[0])/ratio[0])
  177. top = int((i[1]-wh[1])/ratio[1])
  178. width = int((i[2] - wh[0])/ratio[0])
  179. height = int((i[3]-wh[1])/ratio[1])
  180. conf = i[4]
  181. classId = i[5]
  182. cv2.rectangle(srcimg, (int(left), int(top)), (int(width),int(height)), colors(classId, True), 2, lineType=cv2.LINE_AA)
  183. label = '%.2f' % conf
  184. label = '%s:%s' % (self.classes[int(classId)], label)
  185. # Display the label at the top of the bounding box
  186. labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
  187. top = max(top, labelSize[1])
  188. cv2.putText(srcimg, label, (int(left-20),int(top - 10)), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0,255,255), thickness=1, lineType=cv2.LINE_AA)
  189. return srcimg
  190. def mult_test(onnx_path, img_dir, save_root_path, video=False):
  191. model = yolov5(onnx_path)
  192. if video:
  193. cap = cv2.VideoCapture(0)
  194. frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  195. frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  196. fps = cap.get(cv2.CAP_PROP_FPS) #视频平均帧率
  197. size = (frame_height,frame_width) #尺寸和帧率和原视频相同
  198. fourcc = cv2.VideoWriter_fourcc(*'XVID')
  199. out = cv2.VideoWriter('zi.mp4',fourcc,fps,size)
  200. while cap.isOpened():
  201. ok, frame = cap.read()
  202. if not ok:
  203. break
  204. frame = model.detect(frame)
  205. out.write(frame)
  206. cv2.imshow('result', frame)
  207. c = cv2.waitKey(1) & 0xFF
  208. if c==27 or c==ord('q'):
  209. break
  210. cap.release()
  211. out.release()
  212. cv2.destroyAllWindows()
  213. else:
  214. if not os.path.exists(save_root_path):
  215. os.mkdir(save_root_path)
  216. for root, dir, files in os.walk(img_dir):
  217. for file in files:
  218. image_path = os.path.join(root, file)
  219. save_path = os.path.join(save_root_path, file)
  220. if "mp4" in file or 'avi' in file:
  221. cap = cv2.VideoCapture(image_path)
  222. frame_height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  223. frame_width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  224. fps = cap.get(cv2.CAP_PROP_FPS)
  225. size = (frame_width, frame_height)
  226. fourcc = cv2.VideoWriter_fourcc(*'XVID')
  227. out = cv2.VideoWriter(save_path,fourcc,fps,size)
  228. while cap.isOpened():
  229. ok, frame = cap.read()
  230. if not ok:
  231. break
  232. frame = model.detect(frame)
  233. out.write(frame)
  234. cap.release()
  235. out.release()
  236. print(" finish: ", file)
  237. elif 'jpg' or 'png' in file:
  238. srcimg = cv2.imread(image_path)
  239. srcimg = model.detect(srcimg)
  240. print(" finish: ", file)
  241. cv2.imwrite(save_path, srcimg)

使用自己的模型:

将训练好的pt模型转换为onnx 后将模型存放在本项目目录下:

python export.py --weights yolov5s.pt --include onnx

 更改yolov5_dnn.py 第30行代码 更改为自己训练的模型类别名:

  1. self.classes = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat',
  2. 'traffic light',
  3. 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep',
  4. 'cow',
  5. 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase',
  6. 'frisbee',
  7. 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard',
  8. 'surfboard',
  9. 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana',
  10. 'apple',
  11. 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair',
  12. 'couch',
  13. 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard',
  14. 'cell phone',
  15. 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
  16. 'teddy bear',
  17. 'hair drier', 'toothbrush']

后将tt.py里面的路径更换为本机路径: 

video_path:是识别后的视频存放路径

  1. 100
  2. def play_video(self):
  3. # 如果当前是停止状态,设置新的视频路径并播放
  4. if self.player.state() == QMediaPlayer.StoppedState:
  5. video_path = "/Volumes/Hard_disk/yolov5_onnx_dnn-master-3/output_image/text.mp4"
  6. self.player.setMedia(QMediaContent(QUrl.fromLocalFile(video_path)))
  7. self.player.play()

 onnx_path:是模型路径

  1. 104
  2. def start_detection(self):
  3. # 创建input_image文件夹用于存放输入视频
  4. input_folder = "input_image"
  5. os.makedirs(input_folder, exist_ok=True)
  6. # 将选定的视频复制到input_image文件夹中,保存为text.mp4
  7. input_video_path = os.path.join(input_folder, 'text.mp4')
  8. shutil.copyfile(self.video_path, input_video_path)
  9. onnx_path = r'./yolov5s.onnx'
  10. input_path = r'./input_image'
  11. save_path = r'./output_image'
  12. mult_test(onnx_path, input_path, save_path, video=False)
  13. video_path = "/Volumes/Hard_disk/yolov5_onnx_dnn-master-3/output_image/text.mp4"
  14. self.player.setMedia(QMediaContent(QUrl.fromLocalFile(video_path)))
  15. self.player.play()

  1. 122
  2. def realtime_detection(self):
  3. self.cap = cv2.VideoCapture(0) # 0表示默认摄像头
  4. onnx_path = r'./yolov5s.onnx'
  5. model = yolov5(onnx_path)
  6. frame_height = int(self.cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  7. frame_width = int(self.cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  8. fps = self.cap.get(cv2.CAP_PROP_FPS) # 视频平均帧率
  9. size = (frame_height, frame_width) # 尺寸和帧率和原视频相同
  10. fourcc = cv2.VideoWriter_fourcc(*'XVID')
  11. out = cv2.VideoWriter('zi.mp4', fourcc, fps, size)
  12. ret, frame = self.cap.read()
  13. if not ret:
  14. print("无法读取")
  15. else:
  16. frame = model.detect(frame)
  17. frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
  18. image = QImage(frame.data, frame_width, frame_height, QImage.Format_RGB888)
  19. scaled_image = image.scaled(self.label.size(), Qt.KeepAspectRatio)
  20. self.label.setPixmap(QPixmap.fromImage(scaled_image))
  21. self.label.show() # 显示self.label

修改以上地方就可正常使用

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/2023面试高手/article/detail/417881
推荐阅读
相关标签
  

闽ICP备14008679号