当前位置:   article > 正文

AI项目六:基于YOLOV5的CPU版本部署openvino_yolov5 导出 openvino

yolov5 导出 openvino

若该文为原创文章,转载请注明原文出处。

一、CPU版本DEMO测试

1、创建一个新的虚拟环境
conda create -n course_torch_openvino python=3.8
2、激活环境
conda activate course_torch_openvino
3、安装pytorch cpu版本
pip install torch torchvision torchaudio  -i https://pypi.tuna.tsinghua.edu.cn/simple

4、安装

使用的是yolov5-5版本,github上下载。

pip install -r requirements.txt -i https://pypi.tuna.tsinghua.edu.cn/simple

5、运行demo
python demo.py

完整代码 

  1. import cv2
  2. import numpy as np
  3. import torch
  4. import time
  5. # model = torch.hub.load('./yolov5', 'custom', path='./weights/ppe_yolo_n.pt',source='local') # local repo
  6. model = torch.hub.load('./yolov5', 'custom', 'weights/poker_n.pt',source='local')
  7. model.conf = 0.4
  8. cap = cv2.VideoCapture(0)
  9. fps_time = time.time()
  10. while True:
  11. ret,frame = cap.read()
  12. frame = cv2.flip(frame,1)
  13. img_cvt = cv2.cvtColor(frame,cv2.COLOR_BGR2RGB)
  14. # Inference
  15. results = model(img_cvt)
  16. result_np = results.pandas().xyxy[0].to_numpy()
  17. for box in result_np:
  18. l,t,r,b = box[:4].astype('int')
  19. cv2.rectangle(frame,(l,t),(r,b),(0,255,0),5)
  20. cv2.putText(frame,str(box[-1]),(l,t-20),cv2.FONT_ITALIC,1,(0,255,0),2)
  21. now = time.time()
  22. fps_text = 1/(now - fps_time)
  23. fps_time = now
  24. cv2.putText(frame,str(round(fps_text,2)),(50,50),cv2.FONT_ITALIC,1,(0,255,0),2)
  25. cv2.imshow('demo',frame)
  26. if cv2.waitKey(10) & 0xFF == ord('q'):
  27. break
  28. cap.release()
  29. cv2.destroyAllWindows()

运行正常

二、YOLOV5转换成openvino

1、安装onnx
pip install onnx==1.11.0

2、修改文件

修改export.py 的第121行,修改成

opset_version=10
3、导出onnx

使用训练好的best.pt文件,把best.pt转成onnx文件

转换命令为:

python export.py --weights ../weights/best.pt --img 640 --batch 1
4、转成openvino

转换前先安装环境

  1. pip install openvino-dev[onnx]==2021.4.0
  2. pip install openvino==2021.4.0

验证一下,输入mo -h

接下来转换模型,使用下面命令导出模型

mo --input_model weights/best.onnx  --model_name weights/ir_model   -s 255 --reverse_input_channels --output Conv_294,Conv_245,Conv_196

会生成3个文件, ir_model.xml就是要用的文件。

5、运行
python yolov5_demo.py -i cam -m weights/ir_model.xml   -d CPU

代码:

  1. import logging
  2. import os
  3. import sys
  4. from argparse import ArgumentParser, SUPPRESS
  5. from math import exp as exp
  6. from time import time,sleep
  7. import numpy as np
  8. import cv2
  9. from openvino.inference_engine import IENetwork, IECore
  10. logging.basicConfig(format="[ %(levelname)s ] %(message)s", level=logging.INFO, stream=sys.stdout)
  11. log = logging.getLogger()
  12. def build_argparser():
  13. parser = ArgumentParser(add_help=False)
  14. args = parser.add_argument_group('Options')
  15. args.add_argument('-h', '--help', action='help', default=SUPPRESS, help='Show this help message and exit.')
  16. args.add_argument("-m", "--model", help="Required. Path to an .xml file with a trained model.",
  17. required=True, type=str)
  18. args.add_argument("-i", "--input", help="Required. Path to an image/video file. (Specify 'cam' to work with "
  19. "camera)", required=True, type=str)
  20. args.add_argument("-l", "--cpu_extension",
  21. help="Optional. Required for CPU custom layers. Absolute path to a shared library with "
  22. "the kernels implementations.", type=str, default=None)
  23. args.add_argument("-d", "--device",
  24. help="Optional. Specify the target device to infer on; CPU, GPU, FPGA, HDDL or MYRIAD is"
  25. " acceptable. The sample will look for a suitable plugin for device specified. "
  26. "Default value is CPU", default="CPU", type=str)
  27. args.add_argument("-t", "--prob_threshold", help="Optional. Probability threshold for detections filtering",
  28. default=0.5, type=float)
  29. args.add_argument("-iout", "--iou_threshold", help="Optional. Intersection over union threshold for overlapping "
  30. "detections filtering", default=0.4, type=float)
  31. return parser
  32. class YoloParams:
  33. # ------------------------------------------- Extracting layer parameters ------------------------------------------
  34. # Magic numbers are copied from yolo samples
  35. def __init__(self, side):
  36. self.num = 3 #if 'num' not in param else int(param['num'])
  37. self.coords = 4 #if 'coords' not in param else int(param['coords'])
  38. self.classes = 80 #if 'classes' not in param else int(param['classes'])
  39. self.side = side
  40. self.anchors = [10.0, 13.0, 16.0, 30.0, 33.0, 23.0, 30.0, 61.0, 62.0, 45.0, 59.0, 119.0, 116.0, 90.0, 156.0,198.0,373.0, 326.0] #if 'anchors' not in param else [float(a) for a in param['anchors'].split(',')]
  41. def letterbox(img, size=(640, 640), color=(114, 114, 114), auto=True, scaleFill=False, scaleup=True):
  42. # Resize image to a 32-pixel-multiple rectangle https://github.com/ultralytics/yolov3/issues/232
  43. shape = img.shape[:2] # current shape [height, width]
  44. w, h = size
  45. # Scale ratio (new / old)
  46. r = min(h / shape[0], w / shape[1])
  47. if not scaleup: # only scale down, do not scale up (for better test mAP)
  48. r = min(r, 1.0)
  49. # Compute padding
  50. ratio = r, r # width, height ratios
  51. new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
  52. dw, dh = w - new_unpad[0], h - new_unpad[1] # wh padding
  53. if auto: # minimum rectangle
  54. dw, dh = np.mod(dw, 64), np.mod(dh, 64) # wh padding
  55. elif scaleFill: # stretch
  56. dw, dh = 0.0, 0.0
  57. new_unpad = (w, h)
  58. ratio = w / shape[1], h / shape[0] # width, height ratios
  59. dw /= 2 # divide padding into 2 sides
  60. dh /= 2
  61. if shape[::-1] != new_unpad: # resize
  62. img = cv2.resize(img, new_unpad, interpolation=cv2.INTER_LINEAR)
  63. top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
  64. left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
  65. img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color) # add border
  66. top2, bottom2, left2, right2 = 0, 0, 0, 0
  67. if img.shape[0] != h:
  68. top2 = (h - img.shape[0])//2
  69. bottom2 = top2
  70. img = cv2.copyMakeBorder(img, top2, bottom2, left2, right2, cv2.BORDER_CONSTANT, value=color) # add border
  71. elif img.shape[1] != w:
  72. left2 = (w - img.shape[1])//2
  73. right2 = left2
  74. img = cv2.copyMakeBorder(img, top2, bottom2, left2, right2, cv2.BORDER_CONSTANT, value=color) # add border
  75. return img
  76. def scale_bbox(x, y, height, width, class_id, confidence, im_h, im_w, resized_im_h=640, resized_im_w=640):
  77. gain = min(resized_im_w / im_w, resized_im_h / im_h) # gain = old / new
  78. pad = (resized_im_w - im_w * gain) / 2, (resized_im_h - im_h * gain) / 2 # wh padding
  79. x = int((x - pad[0])/gain)
  80. y = int((y - pad[1])/gain)
  81. w = int(width/gain)
  82. h = int(height/gain)
  83. xmin = max(0, int(x - w / 2))
  84. ymin = max(0, int(y - h / 2))
  85. xmax = min(im_w, int(xmin + w))
  86. ymax = min(im_h, int(ymin + h))
  87. # Method item() used here to convert NumPy types to native types for compatibility with functions, which don't
  88. # support Numpy types (e.g., cv2.rectangle doesn't support int64 in color parameter)
  89. return dict(xmin=xmin, xmax=xmax, ymin=ymin, ymax=ymax, class_id=class_id.item(), confidence=confidence.item())
  90. def entry_index(side, coord, classes, location, entry):
  91. side_power_2 = side ** 2
  92. n = location // side_power_2
  93. loc = location % side_power_2
  94. return int(side_power_2 * (n * (coord + classes + 1) + entry) + loc)
  95. def parse_yolo_region(blob, resized_image_shape, original_im_shape, params, threshold):
  96. # ------------------------------------------ Validating output parameters ------------------------------------------
  97. out_blob_n, out_blob_c, out_blob_h, out_blob_w = blob.shape
  98. predictions = 1.0/(1.0+np.exp(-blob))
  99. # ------------------------------------------ Extracting layer parameters -------------------------------------------
  100. orig_im_h, orig_im_w = original_im_shape
  101. resized_image_h, resized_image_w = resized_image_shape
  102. objects = list()
  103. side_square = params.side * params.side
  104. # ------------------------------------------- Parsing YOLO Region output -------------------------------------------
  105. bbox_size = int(out_blob_c/params.num) #4+1+num_classes
  106. index=0
  107. for row, col, n in np.ndindex(params.side, params.side, params.num):
  108. bbox = predictions[0, n*bbox_size:(n+1)*bbox_size, row, col]
  109. x, y, width, height, object_probability = bbox[:5]
  110. class_probabilities = bbox[5:]
  111. if object_probability < threshold:
  112. continue
  113. x = (2*x - 0.5 + col)*(resized_image_w/out_blob_w)
  114. y = (2*y - 0.5 + row)*(resized_image_h/out_blob_h)
  115. if int(resized_image_w/out_blob_w) == 8 & int(resized_image_h/out_blob_h) == 8: #80x80,
  116. idx = 0
  117. elif int(resized_image_w/out_blob_w) == 16 & int(resized_image_h/out_blob_h) == 16: #40x40
  118. idx = 1
  119. elif int(resized_image_w/out_blob_w) == 32 & int(resized_image_h/out_blob_h) == 32: # 20x20
  120. idx = 2
  121. width = (2*width)**2* params.anchors[idx * 6 + 2 * n]
  122. height = (2*height)**2 * params.anchors[idx * 6 + 2 * n + 1]
  123. class_id = np.argmax(class_probabilities)
  124. confidence = object_probability
  125. objects.append(scale_bbox(x=x, y=y, height=height, width=width, class_id=class_id, confidence=confidence,im_h=orig_im_h, im_w=orig_im_w, resized_im_h=resized_image_h, resized_im_w=resized_image_w))
  126. if index >30:
  127. break
  128. index+=1
  129. return objects
  130. def intersection_over_union(box_1, box_2):
  131. width_of_overlap_area = min(box_1['xmax'], box_2['xmax']) - max(box_1['xmin'], box_2['xmin'])
  132. height_of_overlap_area = min(box_1['ymax'], box_2['ymax']) - max(box_1['ymin'], box_2['ymin'])
  133. if width_of_overlap_area < 0 or height_of_overlap_area < 0:
  134. area_of_overlap = 0
  135. else:
  136. area_of_overlap = width_of_overlap_area * height_of_overlap_area
  137. box_1_area = (box_1['ymax'] - box_1['ymin']) * (box_1['xmax'] - box_1['xmin'])
  138. box_2_area = (box_2['ymax'] - box_2['ymin']) * (box_2['xmax'] - box_2['xmin'])
  139. area_of_union = box_1_area + box_2_area - area_of_overlap
  140. if area_of_union == 0:
  141. return 0
  142. return area_of_overlap / area_of_union
  143. def main():
  144. args = build_argparser().parse_args()
  145. # ------------- 1. Plugin initialization for specified device and load extensions library if specified -------------
  146. ie = IECore()
  147. if args.cpu_extension and 'CPU' in args.device:
  148. ie.add_extension(args.cpu_extension, "CPU")
  149. # -------------------- 2. Reading the IR generated by the Model Optimizer (.xml and .bin files) --------------------
  150. model = args.model
  151. net = ie.read_network(model=model)
  152. # ---------------------------------------------- 4. Preparing inputs -----------------------------------------------
  153. input_blob = next(iter(net.input_info))
  154. # Defaulf batch_size is 1
  155. net.batch_size = 1
  156. # Read and pre-process input images
  157. n, c, h, w = net.input_info[input_blob].input_data.shape
  158. # labels_map = [x.strip() for x in f]
  159. labels_map = ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus', 'train', 'truck', 'boat', 'traffic light',
  160. 'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
  161. 'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
  162. 'skis', 'snowboard', 'sports ball', 'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard',
  163. 'tennis racket', 'bottle', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
  164. 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
  165. 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
  166. 'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors', 'teddy bear',
  167. 'hair drier', 'toothbrush']
  168. input_stream = 0 if args.input == "cam" else args.input
  169. is_async_mode = True
  170. cap = cv2.VideoCapture(input_stream)
  171. number_input_frames = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
  172. number_input_frames = 1 if number_input_frames != -1 and number_input_frames < 0 else number_input_frames
  173. wait_key_code = 1
  174. # Number of frames in picture is 1 and this will be read in cycle. Sync mode is default value for this case
  175. if number_input_frames != 1:
  176. ret, frame = cap.read()
  177. else:
  178. is_async_mode = False
  179. wait_key_code = 0
  180. # ----------------------------------------- 5. Loading model to the plugin -----------------------------------------
  181. exec_net = ie.load_network(network=net, num_requests=2, device_name=args.device)
  182. cur_request_id = 0
  183. next_request_id = 1
  184. render_time = 0
  185. parsing_time = 0
  186. # ----------------------------------------------- 6. Doing inference -----------------------------------------------
  187. initial_w = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
  188. initial_h = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
  189. origin_im_size = (initial_h,initial_w)
  190. while cap.isOpened():
  191. # Here is the first asynchronous point: in the Async mode, we capture frame to populate the NEXT infer request
  192. # in the regular mode, we capture frame to the CURRENT infer request
  193. if is_async_mode:
  194. ret, next_frame = cap.read()
  195. else:
  196. ret, frame = cap.read()
  197. if not ret:
  198. break
  199. if is_async_mode:
  200. request_id = next_request_id
  201. in_frame = letterbox(frame, (w, h))
  202. else:
  203. request_id = cur_request_id
  204. in_frame = letterbox(frame, (w, h))
  205. in_frame0 = in_frame
  206. # resize input_frame to network size
  207. in_frame = in_frame.transpose((2, 0, 1)) # Change data layout from HWC to CHW
  208. in_frame = in_frame.reshape((n, c, h, w))
  209. # Start inference
  210. start_time = time()
  211. exec_net.start_async(request_id=request_id, inputs={input_blob: in_frame})
  212. # Collecting object detection results
  213. objects = list()
  214. if exec_net.requests[cur_request_id].wait(-1) == 0:
  215. output = exec_net.requests[cur_request_id].output_blobs
  216. start_time = time()
  217. for layer_name, out_blob in output.items():
  218. layer_params = YoloParams(side=out_blob.buffer.shape[2])
  219. objects += parse_yolo_region(out_blob.buffer, in_frame.shape[2:],
  220. frame.shape[:-1], layer_params,
  221. args.prob_threshold)
  222. parsing_time = time() - start_time
  223. # Filtering overlapping boxes with respect to the --iou_threshold CLI parameter
  224. objects = sorted(objects, key=lambda obj : obj['confidence'], reverse=True)
  225. for i in range(len(objects)):
  226. if objects[i]['confidence'] == 0:
  227. continue
  228. for j in range(i + 1, len(objects)):
  229. if intersection_over_union(objects[i], objects[j]) > args.iou_threshold:
  230. objects[j]['confidence'] = 0
  231. # Drawing objects with respect to the --prob_threshold CLI parameter
  232. objects = [obj for obj in objects if obj['confidence'] >= args.prob_threshold]
  233. for obj in objects:
  234. # Validation bbox of detected object
  235. if obj['xmax'] > origin_im_size[1] or obj['ymax'] > origin_im_size[0] or obj['xmin'] < 0 or obj['ymin'] < 0:
  236. continue
  237. color = (0,255,0)
  238. det_label = labels_map[obj['class_id']] if labels_map and len(labels_map) >= obj['class_id'] else \
  239. str(obj['class_id'])
  240. cv2.rectangle(frame, (obj['xmin'], obj['ymin']), (obj['xmax'], obj['ymax']), color, 2)
  241. cv2.putText(frame,
  242. "#" + det_label + ' ' + str(round(obj['confidence'] * 100, 1)) + ' %',
  243. (obj['xmin'], obj['ymin'] - 7), cv2.FONT_ITALIC, 1, color, 2)
  244. # Draw performance stats over frame
  245. async_mode_message = "Async mode: ON"if is_async_mode else "Async mode: OFF"
  246. cv2.putText(frame, async_mode_message, (10, int(origin_im_size[0] - 20)), cv2.FONT_ITALIC, 1,
  247. (10, 10, 200), 2)
  248. fps_time = time() - start_time
  249. if fps_time !=0:
  250. fps = 1 / fps_time
  251. cv2.putText(frame, 'fps:'+str(round(fps,2)), (50, 50), cv2.FONT_ITALIC, 1, (0, 255, 0), 2)
  252. cv2.imshow("DetectionResults", frame)
  253. if is_async_mode:
  254. cur_request_id, next_request_id = next_request_id, cur_request_id
  255. frame = next_frame
  256. key = cv2.waitKey(wait_key_code)
  257. # ESC key
  258. if key == 27:
  259. break
  260. # Tab key
  261. if key == 9:
  262. exec_net.requests[cur_request_id].wait()
  263. is_async_mode = not is_async_mode
  264. log.info("Switched to {} mode".format("async" if is_async_mode else "sync"))
  265. cv2.destroyAllWindows()
  266. if __name__ == '__main__':
  267. sys.exit(main() or 0)

三、总结

通过openvino加速,CPU没有GPU下,从原本的20帧左右提升到50多帧,效果还可以,就 是用自己的模型,训练出来的效果不怎么好。

使用树莓派等嵌入板子使用openvino效果还可以。

如有侵权,或需要完整代码,请及时联系博主。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/83589
推荐阅读
相关标签
  

闽ICP备14008679号