当前位置:   article > 正文

利用PYQT5结合YOLOX搭建检测系统_yolov8病虫害识别和pyqt5

yolov8病虫害识别和pyqt5

今天给大家分享用pyqt5桌面小组件搭建一个检测系统,暂定为公共场合猫狗检测系统,检测算法为YOLOX。后续会更新YOLOv8+pyqt5教程

系统界面展示: 

该系统可以进行图片检测,实时检测,视频检测

首先创建一个.py文件复制下面代码:

  1. from PIL import Image
  2. import numpy as np
  3. import time
  4. import os
  5. from PyQt5 import QtWidgets, QtCore, QtGui
  6. from PyQt5.QtGui import *
  7. import cv2
  8. import sys
  9. from PyQt5.QtWidgets import *
  10. # from detect_qt5 import main_detect,my_lodelmodel
  11. from demo import main
  12. '''摄像头和视频实时检测界面'''
  13. class Ui_MainWindow(QWidget):
  14. def __init__(self, parent=None):
  15. super(Ui_MainWindow, self).__init__(parent)
  16. # self.face_recong = face.Recognition()
  17. self.timer_camera1 = QtCore.QTimer()
  18. self.timer_camera2 = QtCore.QTimer()
  19. self.timer_camera3 = QtCore.QTimer()
  20. self.timer_camera4 = QtCore.QTimer()
  21. self.cap = cv2.VideoCapture()
  22. self.CAM_NUM = 0
  23. # self.slot_init()
  24. self.__flag_work = 0
  25. self.x = 0
  26. self.count = 0
  27. self.setWindowTitle("公共场合猫狗检测系统")
  28. self.setWindowIcon(QIcon(os.getcwd() + '\\data\\source_image\\Detective.ico'))
  29. self.setFixedSize(1600, 900)
  30. self.yolo=main()
  31. # self.my_model = my_lodelmodel()
  32. self.button_open_camera = QPushButton(self)
  33. self.button_open_camera.setText(u'打开摄像头')
  34. self.button_open_camera.setStyleSheet('''
  35. QPushButton
  36. {text-align : center;
  37. background-color : white;
  38. font: bold;
  39. border-color: gray;
  40. border-width: 2px;
  41. border-radius: 10px;
  42. padding: 6px;
  43. height : 14px;
  44. border-style: outset;
  45. font : 14px;}
  46. QPushButton:pressed
  47. {text-align : center;
  48. background-color : light gray;
  49. font: bold;
  50. border-color: gray;
  51. border-width: 2px;
  52. border-radius: 10px;
  53. padding: 6px;
  54. height : 14px;
  55. border-style: outset;
  56. font : 14px;}
  57. ''')
  58. self.button_open_camera.move(10, 40)
  59. self.button_open_camera.clicked.connect(self.button_open_camera_click)
  60. # self.button_open_camera.clicked.connect(self.button_open_camera_click1)
  61. # btn.clicked.connect(self.openimage)
  62. self.btn1 = QPushButton(self)
  63. self.btn1.setText("检测摄像头")
  64. self.btn1.setStyleSheet('''
  65. QPushButton
  66. {text-align : center;
  67. background-color : white;
  68. font: bold;
  69. border-color: gray;
  70. border-width: 2px;
  71. border-radius: 10px;
  72. padding: 6px;
  73. height : 14px;
  74. border-style: outset;
  75. font : 14px;}
  76. QPushButton:pressed
  77. {text-align : center;
  78. background-color : light gray;
  79. font: bold;
  80. border-color: gray;
  81. border-width: 2px;
  82. border-radius: 10px;
  83. padding: 6px;
  84. height : 14px;
  85. border-style: outset;
  86. font : 14px;}
  87. ''')
  88. self.btn1.move(10, 80)
  89. self.btn1.clicked.connect(self.button_open_camera_click1)
  90. # print("QPushButton构建")
  91. self.open_video = QPushButton(self)
  92. self.open_video.setText("打开视频")
  93. self.open_video.setStyleSheet('''
  94. QPushButton
  95. {text-align : center;
  96. background-color : white;
  97. font: bold;
  98. border-color: gray;
  99. border-width: 2px;
  100. border-radius: 10px;
  101. padding: 6px;
  102. height : 14px;
  103. border-style: outset;
  104. font : 14px;}
  105. QPushButton:pressed
  106. {text-align : center;
  107. background-color : light gray;
  108. font: bold;
  109. border-color: gray;
  110. border-width: 2px;
  111. border-radius: 10px;
  112. padding: 6px;
  113. height : 14px;
  114. border-style: outset;
  115. font : 14px;}
  116. ''')
  117. self.open_video.move(10, 160)
  118. self.open_video.clicked.connect(self.open_video_button)
  119. print("QPushButton构建")
  120. self.btn1 = QPushButton(self)
  121. self.btn1.setText("检测视频文件")
  122. self.btn1.setStyleSheet('''
  123. QPushButton
  124. {text-align : center;
  125. background-color : white;
  126. font: bold;
  127. border-color: gray;
  128. border-width: 2px;
  129. border-radius: 10px;
  130. padding: 6px;
  131. height : 14px;
  132. border-style: outset;
  133. font : 14px;}
  134. QPushButton:pressed
  135. {text-align : center;
  136. background-color : light gray;
  137. font: bold;
  138. border-color: gray;
  139. border-width: 2px;
  140. border-radius: 10px;
  141. padding: 6px;
  142. height : 14px;
  143. border-style: outset;
  144. font : 14px;}
  145. ''')
  146. self.btn1.move(10, 200)
  147. self.btn1.clicked.connect(self.detect_video)
  148. print("QPushButton构建")
  149. # btn1.clicked.connect(self.detect())
  150. # btn1.clicked.connect(self.button1_test)
  151. # btn1.clicked.connect(self.detect())
  152. # btn1.clicked.connect(self.button1_test)
  153. btn2 = QPushButton(self)
  154. btn2.setText("返回上一界面")
  155. btn2.setStyleSheet('''
  156. QPushButton
  157. {text-align : center;
  158. background-color : white;
  159. font: bold;
  160. border-color: gray;
  161. border-width: 2px;
  162. border-radius: 10px;
  163. padding: 6px;
  164. height : 14px;
  165. border-style: outset;
  166. font : 14px;}
  167. QPushButton:pressed
  168. {text-align : center;
  169. background-color : light gray;
  170. font: bold;
  171. border-color: gray;
  172. border-width: 2px;
  173. border-radius: 10px;
  174. padding: 6px;
  175. height : 14px;
  176. border-style: outset;
  177. font : 14px;}
  178. ''')
  179. btn2.move(10, 240)
  180. btn2.clicked.connect(self.back_lastui)
  181. # 信息显示
  182. self.label_show_camera = QLabel(self)
  183. self.label_move = QLabel()
  184. self.label_move.setFixedSize(100, 100)
  185. # self.label_move.setText(" 11 待检测图片")
  186. self.label_show_camera.setFixedSize(700, 500)
  187. self.label_show_camera.setAutoFillBackground(True)
  188. self.label_show_camera.move(110, 80)
  189. self.label_show_camera.setStyleSheet("QLabel{background:#F5F5DC;}"
  190. "QLabel{color:rgb(300,300,300,120);font-size:10px;font-weight:bold;font-family:宋体;}"
  191. )
  192. self.label_show_camera1 = QLabel(self)
  193. self.label_show_camera1.setFixedSize(700, 500)
  194. self.label_show_camera1.setAutoFillBackground(True)
  195. self.label_show_camera1.move(850, 80)
  196. self.label_show_camera1.setStyleSheet("QLabel{background:#F5F5DC;}"
  197. "QLabel{color:rgb(300,300,300,120);font-size:10px;font-weight:bold;font-family:宋体;}"
  198. )
  199. self.timer_camera1.timeout.connect(self.show_camera)
  200. self.timer_camera2.timeout.connect(self.show_camera1)
  201. # self.timer_camera3.timeout.connect(self.show_camera2)
  202. self.timer_camera4.timeout.connect(self.show_camera2)
  203. self.timer_camera4.timeout.connect(self.show_camera3)
  204. self.clicked = False
  205. # self.setWindowTitle(u'摄像头')
  206. self.frame_s = 3
  207. # 设置背景图片
  208. palette1 = QPalette()
  209. palette1.setBrush(self.backgroundRole(), QBrush(QPixmap('R-C.png')))
  210. self.setPalette(palette1)
  211. def back_lastui(self):
  212. self.timer_camera1.stop()
  213. self.cap.release()
  214. self.label_show_camera.clear()
  215. self.timer_camera2.stop()
  216. self.label_show_camera1.clear()
  217. cam_t.close()
  218. ui_p.show()
  219. '''摄像头'''
  220. def button_open_camera_click(self):
  221. if self.timer_camera1.isActive() == False:
  222. flag = self.cap.open(self.CAM_NUM)
  223. if flag == False:
  224. msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",
  225. buttons=QtWidgets.QMessageBox.Ok,
  226. defaultButton=QtWidgets.QMessageBox.Ok)
  227. else:
  228. self.timer_camera1.start(30)
  229. self.button_open_camera.setText(u'关闭摄像头')
  230. else:
  231. self.timer_camera1.stop()
  232. self.cap.release()
  233. self.label_show_camera.clear()
  234. self.timer_camera2.stop()
  235. self.label_show_camera1.clear()
  236. self.button_open_camera.setText(u'打开摄像头')
  237. def show_camera(self): # 摄像头左边
  238. flag, self.image = self.cap.read()
  239. dir_path = os.getcwd()
  240. camera_source = dir_path + "\\data\\test\\2.jpg"
  241. cv2.imwrite(camera_source, self.image)
  242. width = self.image.shape[1]
  243. height = self.image.shape[0]
  244. # 设置新的图片分辨率框架
  245. width_new = 700
  246. height_new = 500
  247. # 判断图片的长宽比率
  248. if width / height >= width_new / height_new:
  249. show = cv2.resize(self.image, (width_new, int(height * width_new / width)))
  250. else:
  251. show = cv2.resize(self.image, (int(width * height_new / height), height_new))
  252. show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
  253. showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], 3 * show.shape[1], QtGui.QImage.Format_RGB888)
  254. self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
  255. def button_open_camera_click1(self):
  256. if self.timer_camera2.isActive() == False:
  257. flag = self.cap.open(self.CAM_NUM)
  258. if flag == False:
  259. msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",
  260. buttons=QtWidgets.QMessageBox.Ok,
  261. defaultButton=QtWidgets.QMessageBox.Ok)
  262. else:
  263. self.timer_camera2.start(30)
  264. self.button_open_camera.setText(u'关闭摄像头')
  265. else:
  266. self.timer_camera2.stop()
  267. self.cap.release()
  268. self.label_show_camera1.clear()
  269. self.button_open_camera.setText(u'打开摄像头')
  270. def show_camera1(self):
  271. fps = 0.0
  272. t1 = time.time()
  273. flag, self.image = self.cap.read()
  274. self.image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB)
  275. # self.image = Image.fromarray(np.uint8(self.image))
  276. im0, nums, ti = self.yolo.demoimg(self.image)
  277. im0= cv2.cvtColor(im0, cv2.COLOR_BGR2RGB)
  278. width = im0.shape[1]
  279. height = im0.shape[0]
  280. # 设置新的图片分辨率框架
  281. width_new = 640
  282. height_new = 640
  283. # 判断图片的长宽比率
  284. if width / height >= width_new / height_new:
  285. show = cv2.resize(im0, (width_new, int(height * width_new / width)))
  286. else:
  287. show = cv2.resize(im0, (int(width * height_new / height), height_new))
  288. # im0 = cv2.cvtColor(show, cv2.COLOR_RGB2BGR)
  289. if nums>= 1:
  290. fps = (fps + (1. / (time.time() - t1))) / 2
  291. im0 = cv2.putText(im0, "fps= %.2f" % (fps), (0, 40), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), 2)
  292. im0 = cv2.putText(im0, "No pets allowed", (0, 150), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
  293. showImage = QtGui.QImage(im0, im0.shape[1], im0.shape[0], 3 * im0.shape[1], QtGui.QImage.Format_RGB888)
  294. self.label_show_camera1.setPixmap(QtGui.QPixmap.fromImage(showImage))
  295. '''视频检测'''
  296. def open_video_button(self):
  297. if self.timer_camera4.isActive() == False:
  298. imgName, imgType = QFileDialog.getOpenFileName(self, "打开视频", "", "*.mp4;;*.AVI;;*.rmvb;;All Files(*)")
  299. self.cap_video = cv2.VideoCapture(imgName)
  300. flag = self.cap_video.isOpened()
  301. if flag == False:
  302. msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",
  303. buttons=QtWidgets.QMessageBox.Ok,
  304. defaultButton=QtWidgets.QMessageBox.Ok)
  305. else:
  306. # self.timer_camera3.start(10)
  307. self.show_camera2()
  308. self.open_video.setText(u'关闭视频')
  309. else:
  310. # self.timer_camera3.stop()
  311. self.cap_video.release()
  312. self.label_show_camera.clear()
  313. self.timer_camera4.stop()
  314. self.frame_s = 3
  315. self.label_show_camera1.clear()
  316. self.open_video.setText(u'打开视频')
  317. def detect_video(self):
  318. if self.timer_camera4.isActive() == False:
  319. flag = self.cap_video.isOpened()
  320. if flag == False:
  321. msg = QtWidgets.QMessageBox.warning(self, u"Warning", u"请检测相机与电脑是否连接正确",
  322. buttons=QtWidgets.QMessageBox.Ok,
  323. defaultButton=QtWidgets.QMessageBox.Ok)
  324. else:
  325. self.timer_camera4.start(30)
  326. else:
  327. self.timer_camera4.stop()
  328. self.cap_video.release()
  329. self.label_show_camera1.clear()
  330. def show_camera2(self): # 显示视频的左边
  331. # 抽帧
  332. length = int(self.cap_video.get(cv2.CAP_PROP_FRAME_COUNT)) # 抽帧
  333. print(self.frame_s, length) # 抽帧
  334. flag, self.image1 = self.cap_video.read() # image1是视频的
  335. if flag == True:
  336. width = self.image1.shape[1]
  337. height = self.image1.shape[0]
  338. # 设置新的图片分辨率框架
  339. width_new = 700
  340. height_new = 500
  341. # 判断图片的长宽比率
  342. if width / height >= width_new / height_new:
  343. show = cv2.resize(self.image1, (width_new, int(height * width_new / width)))
  344. else:
  345. show = cv2.resize(self.image1, (int(width * height_new / height), height_new))
  346. show = cv2.cvtColor(show, cv2.COLOR_BGR2RGB)
  347. showImage = QtGui.QImage(show.data, show.shape[1], show.shape[0], 3 * show.shape[1],
  348. QtGui.QImage.Format_RGB888)
  349. self.label_show_camera.setPixmap(QtGui.QPixmap.fromImage(showImage))
  350. else:
  351. self.cap_video.release()
  352. self.label_show_camera.clear()
  353. self.timer_camera4.stop()
  354. self.label_show_camera1.clear()
  355. self.open_video.setText(u'打开视频')
  356. def show_camera3(self):
  357. flag, self.image1 = self.cap_video.read()
  358. self.frame_s += 1
  359. if flag == True:
  360. # if self.frame_s % 3 == 0: #抽帧
  361. # face = self.face_detect.align(self.image)
  362. # if face:
  363. # pass
  364. # dir_path = os.getcwd()
  365. # camera_source = dir_path + "\\data\\test\\video.jpg"
  366. #
  367. # cv2.imwrite(camera_source, self.image1)
  368. # print("im01")
  369. # im0, label = main_detect(self.my_model, camera_source)
  370. im0,nums,ti = self.yolo.demoimg(self.image1)
  371. # print("imo",im0)
  372. # print(label)
  373. # if label == 'debug':
  374. # print("labelkong")
  375. # print("debug")
  376. # im0, label = slef.detect()
  377. # print("debug1")
  378. width = im0.shape[1]
  379. height = im0.shape[0]
  380. # 设置新的图片分辨率框架
  381. width_new = 700
  382. height_new = 500
  383. # 判断图片的长宽比率
  384. if width / height >= width_new / height_new:
  385. show = cv2.resize(im0, (width_new, int(height * width_new / width)))
  386. else:
  387. show = cv2.resize(im0, (int(width * height_new / height), height_new))
  388. im0 = show#cv2.cvtColor(show, cv2.COLOR_RGB2BGR)
  389. # print("debug2")
  390. if nums >= 1:
  391. im0 = cv2.putText(im0, "Warning", (10, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
  392. im0 = cv2.putText(im0, f"nums:{nums}", (10, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
  393. showImage = QtGui.QImage(im0, im0.shape[1], im0.shape[0], 3 * im0.shape[1], QtGui.QImage.Format_RGB888)
  394. self.label_show_camera1.setPixmap(QtGui.QPixmap.fromImage(showImage))
  395. '''单张图片检测'''
  396. class picture(QWidget):
  397. def __init__(self):
  398. super(picture, self).__init__()
  399. self.str_name = '0'
  400. self.yolo = main()
  401. # self.my_model=my_lodelmodel()
  402. self.resize(1600, 900)
  403. self.setWindowIcon(QIcon(os.getcwd() + '\\data\\source_image\\Detective.ico'))
  404. self.setWindowTitle("公共场合猫狗检测系统")
  405. # window_pale = QtGui.QPalette()
  406. # window_pale.setBrush(self.backgroundRole(), QtGui.QBrush(
  407. # QtGui.QPixmap(os.getcwd() + '\\data\\source_image\\backgroud.jpg')))
  408. # self.setPalette(window_pale)
  409. palette2 = QPalette()
  410. palette2.setBrush(self.backgroundRole(), QBrush(QPixmap('4.jpg')))
  411. self.setPalette(palette2)
  412. camera_or_video_save_path = 'data\\test'
  413. if not os.path.exists(camera_or_video_save_path):
  414. os.makedirs(camera_or_video_save_path)
  415. self.label1 = QLabel(self)
  416. self.label1.setText(" 待检测图片")
  417. self.label1.setFixedSize(700, 500)
  418. self.label1.move(110, 80)
  419. self.label1.setStyleSheet("QLabel{background:#7A6969;}"
  420. "QLabel{color:rgb(300,300,300,120);font-size:20px;font-weight:bold;font-family:宋体;}"
  421. )
  422. self.label2 = QLabel(self)
  423. self.label2.setText("检测结果")
  424. self.label2.setFixedSize(700, 500)
  425. self.label2.move(850, 80)
  426. self.label2.setStyleSheet("QLabel{background:#7A6969;}"
  427. "QLabel{color:rgb(300,300,300,120);font-size:20px;font-weight:bold;font-family:宋体;}"
  428. )
  429. self.label3 = QLabel(self)
  430. self.label3.setText("")
  431. self.label3.move(1200, 620)
  432. self.label3.setStyleSheet("font-size:20px;")
  433. self.label3.adjustSize()
  434. btn = QPushButton(self)
  435. btn.setText("打开图片")
  436. btn.setStyleSheet('''
  437. QPushButton
  438. {text-align : center;
  439. background-color : white;
  440. font: bold;
  441. border-color: gray;
  442. border-width: 2px;
  443. border-radius: 10px;
  444. padding: 6px;
  445. height : 14px;
  446. border-style: outset;
  447. font : 14px;}
  448. QPushButton:pressed
  449. {text-align : center;
  450. background-color : light gray;
  451. font: bold;
  452. border-color: gray;
  453. border-width: 2px;
  454. border-radius: 10px;
  455. padding: 6px;
  456. height : 14px;
  457. border-style: outset;
  458. font : 14px;}
  459. ''')
  460. btn.move(10, 30)
  461. btn.clicked.connect(self.openimage)
  462. btn1 = QPushButton(self)
  463. btn1.setText("检测图片")
  464. btn1.setStyleSheet('''
  465. QPushButton
  466. {text-align : center;
  467. background-color : white;
  468. font: bold;
  469. border-color: gray;
  470. border-width: 2px;
  471. border-radius: 10px;
  472. padding: 6px;
  473. height : 14px;
  474. border-style: outset;
  475. font : 14px;}
  476. QPushButton:pressed
  477. {text-align : center;
  478. background-color : light gray;
  479. font: bold;
  480. border-color: gray;
  481. border-width: 2px;
  482. border-radius: 10px;
  483. padding: 6px;
  484. height : 14px;
  485. border-style: outset;
  486. font : 14px;}
  487. ''')
  488. btn1.move(10, 80)
  489. # print("QPushButton构建")
  490. btn1.clicked.connect(self.button1_test)
  491. btn3 = QPushButton(self)
  492. btn3.setText("视频和摄像头检测")
  493. btn3.setStyleSheet('''
  494. QPushButton
  495. {text-align : center;
  496. background-color : white;
  497. font: bold;
  498. border-color: gray;
  499. border-width: 2px;
  500. border-radius: 10px;
  501. padding: 6px;
  502. height : 14px;
  503. border-style: outset;
  504. font : 14px;}
  505. QPushButton:pressed
  506. {text-align : center;
  507. background-color : light gray;
  508. font: bold;
  509. border-color: gray;
  510. border-width: 2px;
  511. border-radius: 10px;
  512. padding: 6px;
  513. height : 14px;
  514. border-style: outset;
  515. font : 14px;}
  516. ''')
  517. btn3.move(10, 160)
  518. btn3.clicked.connect(self.camera_find)
  519. self.imgname1 = '0'
  520. def camera_find(self):
  521. ui_p.close()
  522. cam_t.show()
  523. def openimage(self):
  524. imgName, imgType = QFileDialog.getOpenFileName(self, "打开图片", "D://",
  525. "Image files (*.jpg *.gif *.png *.jpeg)") # "*.jpg;;*.png;;All Files(*)"
  526. if imgName != '':
  527. self.imgname1 = imgName
  528. # print("imgName",imgName,type(imgName))
  529. self.im0 = cv2.imread(imgName)
  530. width = self.im0.shape[1]
  531. height = self.im0.shape[0]
  532. # 设置新的图片分辨率框架
  533. width_new = 700
  534. height_new = 500
  535. # 判断图片的长宽比率
  536. if width / height >= width_new / height_new:
  537. show = cv2.resize(self.im0, (width_new, int(height * width_new / width)))
  538. else:
  539. show = cv2.resize(self.im0, (int(width * height_new / height), height_new))
  540. im0 = cv2.cvtColor(show, cv2.COLOR_RGB2BGR)
  541. showImage = QtGui.QImage(im0, im0.shape[1], im0.shape[0], 3 * im0.shape[1], QtGui.QImage.Format_RGB888)
  542. self.label1.setPixmap(QtGui.QPixmap.fromImage(showImage))
  543. # jpg = QtGui.QPixmap(imgName).scaled(self.label1.width(), self.label1.height())
  544. # self.label1.setPixmap(jpg)
  545. def button1_test(self):
  546. if self.imgname1 != '0':
  547. # QApplication.processEvents()
  548. # image = Image.open(self.imgname1)
  549. image = cv2.imread(self.imgname1)
  550. # K, im0 = self.yolo.detect_image(image)
  551. im0,nums,time=self.yolo.demoimg(image)
  552. print(nums)
  553. # im0 = np.array(im0)
  554. # QApplication.processEvents()
  555. width = im0.shape[1]
  556. height = im0.shape[0]
  557. # 设置新的图片分辨率框架
  558. width_new = 700
  559. height_new = 700
  560. # 判断图片的长宽比率
  561. if width / height >= width_new / height_new:
  562. im0 = cv2.resize(im0, (width_new, int(height * width_new / width)))
  563. else:
  564. im0 = cv2.resize(im0, (int(width * height_new / height), height_new))
  565. im0 = cv2.putText(im0, f"Infertime:{round(time,2)}s", (410, 80), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
  566. # im0 = cv2.cvtColor(show, cv2.COLOR_RGB2BGR)
  567. if nums >= 1:
  568. im0 = cv2.putText(im0, "Warning", (410, 20), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
  569. im0 = cv2.putText(im0, f"nums:{nums}", (410, 50), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 2)
  570. image_name = QtGui.QImage(im0, im0.shape[1], im0.shape[0], 3 * im0.shape[1], QtGui.QImage.Format_RGB888)
  571. # label=label.split(' ')[0] #label 59 0.96 分割字符串 取前一个
  572. self.label2.setPixmap(QtGui.QPixmap.fromImage(image_name))
  573. # jpg = QtGui.QPixmap(image_name).scaled(self.label1.width(), self.label1.height())
  574. # self.label2.setPixmap(jpg)
  575. else:
  576. QMessageBox.information(self, '错误', '请先选择一个图片文件', QMessageBox.Yes, QMessageBox.Yes)
  577. if __name__ == '__main__':
  578. app = QApplication(sys.argv)
  579. splash = QSplashScreen(QPixmap(".\\data\\source_image\\logo.png"))
  580. # 设置画面中的文字的字体
  581. splash.setFont(QFont('Microsoft YaHei UI', 12))
  582. # 显示画面
  583. splash.show()
  584. # 显示信息
  585. splash.showMessage("程序初始化中... 0%", QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom, QtCore.Qt.black)
  586. time.sleep(0.3)
  587. splash.showMessage("正在加载模型配置文件...60%", QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom, QtCore.Qt.black)
  588. cam_t = Ui_MainWindow()
  589. splash.showMessage("正在加载模型配置文件...100%", QtCore.Qt.AlignLeft | QtCore.Qt.AlignBottom, QtCore.Qt.black)
  590. ui_p = picture()
  591. ui_p.show()
  592. splash.close()
  593. sys.exit(app.exec_())

想给系统起什么名字自己更换以及背景

然后将YOLOX demo.py文件移动至根目录下,并将下面内容复制过去:

  1. import argparse
  2. import os
  3. import time
  4. from loguru import logger
  5. import cv2
  6. import torch
  7. from yolox.data.data_augment import ValTransform
  8. from yolox.data.datasets import COCO_CLASSES
  9. from yolox.exp import get_exp
  10. from yolox.utils import fuse_model, get_model_info, postprocess, vis
  11. IMAGE_EXT = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
  12. def make_parser():
  13. parser = argparse.ArgumentParser("YOLOX Demo!")
  14. parser.add_argument(
  15. "--demo", default="image", help="demo type, eg. image, video and webcam"
  16. )
  17. parser.add_argument("-expn", "--experiment-name", type=str, default=None)
  18. parser.add_argument("-n", "--name", type=str, default=None, help="model name")
  19. parser.add_argument(
  20. "--path", default="./assets/dog.jpg", help="path to images or video"
  21. )
  22. parser.add_argument("--camid", type=int, default=0, help="webcam demo camera id")
  23. parser.add_argument(
  24. "--save_result",
  25. action="store_true",
  26. help="whether to save the inference result of image/video",
  27. )
  28. # exp file
  29. parser.add_argument(
  30. "-f",
  31. "--exp_file",
  32. default='exps/default/yolox_s.py',
  33. type=str,
  34. help="please input your experiment description file",
  35. )
  36. parser.add_argument("-c", "--ckpt", default='yolox_s.pth', type=str, help="ckpt for eval")
  37. parser.add_argument(
  38. "--device",
  39. default="cpu",
  40. type=str,
  41. help="device to run our model, can either be cpu or gpu",
  42. )
  43. parser.add_argument("--conf", default=0.01, type=float, help="test conf")
  44. parser.add_argument("--nms", default=0.45, type=float, help="test nms threshold")
  45. parser.add_argument("--tsize", default=640, type=int, help="test img size")
  46. parser.add_argument(
  47. "--fp16",
  48. dest="fp16",
  49. default=False,
  50. action="store_true",
  51. help="Adopting mix precision evaluating.",
  52. )
  53. parser.add_argument(
  54. "--legacy",
  55. dest="legacy",
  56. default=False,
  57. action="store_true",
  58. help="To be compatible with older versions",
  59. )
  60. parser.add_argument(
  61. "--fuse",
  62. dest="fuse",
  63. default=False,
  64. action="store_true",
  65. help="Fuse conv and bn for testing.",
  66. )
  67. parser.add_argument(
  68. "--trt",
  69. dest="trt",
  70. default=False,
  71. action="store_true",
  72. help="Using TensorRT model for testing.",
  73. )
  74. return parser
  75. def get_image_list(path):
  76. image_names = []
  77. for maindir, subdir, file_name_list in os.walk(path):
  78. for filename in file_name_list:
  79. apath = os.path.join(maindir, filename)
  80. ext = os.path.splitext(apath)[1]
  81. if ext in IMAGE_EXT:
  82. image_names.append(apath)
  83. return image_names
  84. class Predictor(object):
  85. def __init__(
  86. self,
  87. model,
  88. exp,
  89. cls_names=COCO_CLASSES,
  90. trt_file=None,
  91. decoder=None,
  92. device="cpu",
  93. fp16=False,
  94. legacy=False,
  95. ):
  96. self.model = model
  97. self.cls_names = cls_names
  98. self.decoder = decoder
  99. self.num_classes = exp.num_classes
  100. self.confthre = exp.test_conf
  101. self.nmsthre = exp.nmsthre
  102. self.test_size = exp.test_size
  103. self.device = device
  104. self.fp16 = fp16
  105. self.preproc = ValTransform(legacy=legacy)
  106. if trt_file is not None:
  107. from torch2trt import TRTModule
  108. model_trt = TRTModule()
  109. model_trt.load_state_dict(torch.load(trt_file))
  110. x = torch.ones(1, 3, exp.test_size[0], exp.test_size[1]).cuda()
  111. self.model(x)
  112. self.model = model_trt
  113. def inference(self, img):
  114. # img = cv2.cvtColor(np.array(img), cv2.COLOR_RGB2BGR)
  115. img_info = {"id": 0}
  116. # if isinstance(img, str):
  117. # img_info["file_name"] = os.path.basename(img)
  118. # img = cv2.imread(img)
  119. # else:
  120. img_info["file_name"] = None
  121. img= cv2.cvtColor(img, cv2.COLOR_RGB2BGR)
  122. height, width = img.shape[:2]
  123. img_info["height"] = height
  124. img_info["width"] = width
  125. img_info["raw_img"] = img
  126. ratio = min(self.test_size[0] / img.shape[0], self.test_size[1] / img.shape[1])
  127. img_info["ratio"] = ratio
  128. img, _ = self.preproc(img, None, self.test_size)
  129. img = torch.from_numpy(img).unsqueeze(0)
  130. img = img.float()
  131. if self.device == "gpu":
  132. img = img.cuda()
  133. if self.fp16:
  134. img = img.half() # to FP16
  135. with torch.no_grad():
  136. t0 = time.time()
  137. outputs = self.model(img)
  138. if self.decoder is not None:
  139. outputs = self.decoder(outputs, dtype=outputs.type())
  140. outputs = postprocess(
  141. outputs, self.num_classes, self.confthre,
  142. self.nmsthre, class_agnostic=True
  143. )
  144. logger.info("Infer time: {:.4f}s".format(time.time() - t0))
  145. return outputs, img_info,time.time() - t0
  146. def visual(self, output, img_info, cls_conf=0.35):
  147. ratio = img_info["ratio"]
  148. img = img_info["raw_img"]
  149. if output is None:
  150. return img,0
  151. output = output.cpu()
  152. bboxes = output[:, 0:4]
  153. # preprocessing: resize
  154. bboxes /= ratio
  155. cls = output[:, 6]
  156. scores = output[:, 4] * output[:, 5]
  157. vis_res,k = vis(img, bboxes, scores, cls, cls_conf, self.cls_names)
  158. return vis_res,k
  159. def image_demo(predictor,current_time,image):
  160. # if os.path.isdir(path):
  161. # files = get_image_list(path)
  162. # else:
  163. # files = [path]
  164. # files.sort()
  165. # for image_name in files:
  166. outputs, img_info,ti = predictor.inference(image)
  167. result_image,nums = predictor.visual(outputs[0], img_info, predictor.confthre)
  168. return result_image,nums,ti
  169. # if save_result:
  170. # save_folder = os.path.join(
  171. # vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
  172. # )
  173. # os.makedirs(save_folder, exist_ok=True)
  174. # save_file_name = os.path.join(save_folder, os.path.basename(image_name))
  175. # logger.info("Saving detection result in {}".format(save_file_name))
  176. # cv2.imwrite(save_file_name, result_image)
  177. # ch = cv2.waitKey(0)
  178. # if ch == 27 or ch == ord("q") or ch == ord("Q"):
  179. # break
  180. def imageflow_demo(predictor, vis_folder, current_time, args):
  181. # cap = cv2.VideoCapture(args.path if args.demo == "video" else args.camid)
  182. # width = cap.get(cv2.CAP_PROP_FRAME_WIDTH) # float
  183. # height = cap.get(cv2.CAP_PROP_FRAME_HEIGHT) # float
  184. # fps = cap.get(cv2.CAP_PROP_FPS)
  185. # if args.save_result:
  186. # save_folder = os.path.join(
  187. # vis_folder, time.strftime("%Y_%m_%d_%H_%M_%S", current_time)
  188. # )
  189. # os.makedirs(save_folder, exist_ok=True)
  190. # if args.demo == "video":
  191. # save_path = os.path.join(save_folder, os.path.basename(args.path))
  192. # else:
  193. # save_path = os.path.join(save_folder, "camera.mp4")
  194. # logger.info(f"video save_path is {save_path}")
  195. # vid_writer = cv2.VideoWriter(
  196. # save_path, cv2.VideoWriter_fourcc(*"mp4v"), fps, (int(width), int(height))
  197. # )
  198. while True:
  199. ret_val, frame = cap.read()
  200. if ret_val:
  201. outputs, img_info = predictor.inference(frame)
  202. result_frame = predictor.visual(outputs[0], img_info, predictor.confthre)
  203. if args.save_result:
  204. vid_writer.write(result_frame)
  205. else:
  206. cv2.namedWindow("yolox", cv2.WINDOW_NORMAL)
  207. cv2.imshow("yolox", result_frame)
  208. ch = cv2.waitKey(1)
  209. if ch == 27 or ch == ord("q") or ch == ord("Q"):
  210. break
  211. else:
  212. break
  213. class main(object):
  214. def __init__(self):
  215. args = make_parser().parse_args()
  216. exp = get_exp(args.exp_file, args.name)
  217. if not args.experiment_name:
  218. args.experiment_name = exp.exp_name
  219. file_name = os.path.join(exp.output_dir, args.experiment_name)
  220. os.makedirs(file_name, exist_ok=True)
  221. # vis_folder = None
  222. # if args.save_result:
  223. # vis_folder = os.path.join(file_name, "vis_res")
  224. # os.makedirs(vis_folder, exist_ok=True)
  225. if args.trt:
  226. args.device = "gpu"
  227. logger.info("Args: {}".format(args))
  228. if args.conf is not None:
  229. exp.test_conf = args.conf
  230. if args.nms is not None:
  231. exp.nmsthre = args.nms
  232. if args.tsize is not None:
  233. exp.test_size = (args.tsize, args.tsize)
  234. model = exp.get_model()
  235. logger.info("Model Summary: {}".format(get_model_info(model, exp.test_size)))
  236. if args.device == "gpu":
  237. model.cuda()
  238. if args.fp16:
  239. model.half() # to FP16
  240. model.eval()
  241. if not args.trt:
  242. if args.ckpt is None:
  243. ckpt_file = os.path.join(file_name, "best_ckpt.pth")
  244. else:
  245. ckpt_file = args.ckpt
  246. logger.info("loading checkpoint")
  247. ckpt = torch.load(ckpt_file, map_location="cpu")
  248. # load the model state dict
  249. model.load_state_dict(ckpt["model"])
  250. logger.info("loaded checkpoint done.")
  251. if args.fuse:
  252. logger.info("\tFusing model...")
  253. model = fuse_model(model)
  254. if args.trt:
  255. assert not args.fuse, "TensorRT model is not support model fusing!"
  256. trt_file = os.path.join(file_name, "model_trt.pth")
  257. assert os.path.exists(
  258. trt_file
  259. ), "TensorRT model is not found!\n Run python3 tools/trt.py first!"
  260. model.head.decode_in_inference = False
  261. decoder = model.head.decode_outputs
  262. logger.info("Using TensorRT to inference")
  263. else:
  264. trt_file = None
  265. decoder = None
  266. self.predictor = Predictor(
  267. model, exp, COCO_CLASSES, trt_file, decoder,
  268. args.device, args.fp16, args.legacy,
  269. )
  270. def demoimg(self,img):
  271. current_time = time.localtime()
  272. im=image_demo(self.predictor,current_time,img)
  273. return im
  274. def demovido(self,img):
  275. imageflow_demo(predictor, img, current_time)
  276. if __name__ == "__main__":
  277. args = make_parser().parse_args()
  278. exp = get_exp(args.exp_file, args.name)
  279. main(exp, args)

需要的参数直接在上面改好,由于这里我没有单独训练猫狗数据集直接利用的YOLOX-s的权重文件,然后需要将索引更改为猫和狗的分类索引,更改visualize.py文件

  1. def vis(img, boxes, scores, cls_ids, conf=0.5, class_names=None):
  2. k = 0
  3. for i in range(len(boxes)):
  4. box = boxes[i]
  5. cls_id = int(cls_ids[i])
  6. if cls_id in (15,16):
  7. score = scores[i]
  8. if score < conf:
  9. continue
  10. k+=1
  11. x0 = int(box[0])
  12. y0 = int(box[1])
  13. x1 = int(box[2])
  14. y1 = int(box[3])
  15. color = (_COLORS[cls_id] * 255).astype(np.uint8).tolist()
  16. text = '{}:{:.1f}%'.format(class_names[cls_id], score * 100)
  17. txt_color = (0, 0, 0) if np.mean(_COLORS[cls_id]) > 0.5 else (255, 255, 255)
  18. font = cv2.FONT_HERSHEY_SIMPLEX
  19. txt_size = cv2.getTextSize(text, font, 0.4, 1)[0]
  20. cv2.rectangle(img, (x0, y0), (x1, y1), color, 2)
  21. txt_bk_color = (_COLORS[cls_id] * 255 * 0.7).astype(np.uint8).tolist()
  22. cv2.rectangle(
  23. img,
  24. (x0, y0 + 1),
  25. (x0 + txt_size[0] + 1, y0 + int(1.5*txt_size[1])),
  26. txt_bk_color,
  27. -1
  28. )
  29. cv2.putText(img, text, (x0, y0 + txt_size[1]), font, 0.4, txt_color, thickness=1)
  30. return img,k

因为希望知道数量所以这里设置了个参数k并进行return

最后运行一下之前的pyqt5文件就可以进行检测了:

示例如下:

 

 

本文内容由网友自发贡献,转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号