当前位置:   article > 正文

YOLOV5-LITE实时目标检测(onnxruntime部署+opencv获取摄像头+NCNN部署)python版本和C++版本_yolov5 lite

yolov5 lite

1.训练好的pt模型转换为onnx格式

使用yolov5-lite自带的export.py导出onnx格式,图像大小设置320,batch 1

之后可以使用 onnxsim对模型进一步简化

onnxsim参考链接:onnxsim-让导出的onnx模型更精简_alex1801的博客-CSDN博客

  1. python export.py --weights weights/v5lite-e.pt --img 320 --batch 1
  2. python -m onnxsim weights/v5lite-e.onnx weights/yolov5-lite-sim.onnx

2.使用onnxruntime调用onnx模型实时推理(python版本

这个版本的推理FPS能有11+FPS

这两处换成自己的模型和训练的类别即可:

    parser.add_argument('--modelpath', type=str, default="/media/xcy/dcd05f09-46df-4879-bfeb-3bab03a6cc3a/YOLOv5-Lite/weights/v5lite-e.onnx",
                        help="onnx filepath")
    parser.add_argument('--classfile', type=str, default='coco.names',
                        help="classname filepath")

参考github:GitHub - hpc203/yolov5-lite-onnxruntime: 使用ONNXRuntime部署yolov5-lite目标检测,包含C++和Python两个版本的程序

  1. import cv2
  2. import numpy as np
  3. import argparse
  4. import onnxruntime as ort
  5. import time
  6. class yolov5_lite():
  7. def __init__(self, model_pb_path, label_path, confThreshold=0.5, nmsThreshold=0.5, objThreshold=0.5):
  8. so = ort.SessionOptions()
  9. so.log_severity_level = 3
  10. self.net = ort.InferenceSession(model_pb_path, so)
  11. self.classes = list(map(lambda x: x.strip(), open(label_path, 'r').readlines()))
  12. self.num_classes = len(self.classes)
  13. anchors = [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]]
  14. self.nl = len(anchors)
  15. self.na = len(anchors[0]) // 2
  16. self.no = self.num_classes + 5
  17. self.grid = [np.zeros(1)] * self.nl
  18. self.stride = np.array([8., 16., 32.])
  19. self.anchor_grid = np.asarray(anchors, dtype=np.float32).reshape(self.nl, -1, 2)
  20. self.confThreshold = confThreshold
  21. self.nmsThreshold = nmsThreshold
  22. self.objThreshold = objThreshold
  23. self.input_shape = (self.net.get_inputs()[0].shape[2], self.net.get_inputs()[0].shape[3])
  24. def resize_image(self, srcimg, keep_ratio=True):
  25. top, left, newh, neww = 0, 0, self.input_shape[0], self.input_shape[1]
  26. if keep_ratio and srcimg.shape[0] != srcimg.shape[1]:
  27. hw_scale = srcimg.shape[0] / srcimg.shape[1]
  28. if hw_scale > 1:
  29. newh, neww = self.input_shape[0], int(self.input_shape[1] / hw_scale)
  30. img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
  31. left = int((self.input_shape[1] - neww) * 0.5)
  32. img = cv2.copyMakeBorder(img, 0, 0, left, self.input_shape[1] - neww - left, cv2.BORDER_CONSTANT,
  33. value=0) # add border
  34. else:
  35. newh, neww = int(self.input_shape[0] * hw_scale), self.input_shape[1]
  36. img = cv2.resize(srcimg, (neww, newh), interpolation=cv2.INTER_AREA)
  37. top = int((self.input_shape[0] - newh) * 0.5)
  38. img = cv2.copyMakeBorder(img, top, self.input_shape[0] - newh - top, 0, 0, cv2.BORDER_CONSTANT, value=0)
  39. else:
  40. img = cv2.resize(srcimg, self.input_shape, interpolation=cv2.INTER_AREA)
  41. return img, newh, neww, top, left
  42. def _make_grid(self, nx=20, ny=20):
  43. xv, yv = np.meshgrid(np.arange(ny), np.arange(nx))
  44. return np.stack((xv, yv), 2).reshape((-1, 2)).astype(np.float32)
  45. def postprocess(self, frame, outs, pad_hw):
  46. newh, neww, padh, padw = pad_hw
  47. frameHeight = frame.shape[0]
  48. frameWidth = frame.shape[1]
  49. ratioh, ratiow = frameHeight / newh, frameWidth / neww
  50. # Scan through all the bounding boxes output from the network and keep only the
  51. # ones with high confidence scores. Assign the box's class label as the class with the highest score.
  52. classIds = []
  53. confidences = []
  54. boxes = []
  55. for detection in outs:
  56. scores = detection[5:]
  57. classId = np.argmax(scores)
  58. confidence = scores[classId]
  59. if confidence > self.confThreshold and detection[4] > self.objThreshold:
  60. center_x = int((detection[0] - padw) * ratiow)
  61. center_y = int((detection[1] - padh) * ratioh)
  62. width = int(detection[2] * ratiow)
  63. height = int(detection[3] * ratioh)
  64. left = int(center_x - width / 2)
  65. top = int(center_y - height / 2)
  66. classIds.append(classId)
  67. confidences.append(float(confidence))
  68. boxes.append([left, top, width, height])
  69. # Perform non maximum suppression to eliminate redundant overlapping boxes with
  70. # lower confidences.
  71. indices = cv2.dnn.NMSBoxes(boxes, confidences, self.confThreshold, self.nmsThreshold)
  72. for i in indices:
  73. i = i[0] if isinstance(i, (tuple, list)) else i
  74. box = boxes[i]
  75. left = box[0]
  76. top = box[1]
  77. width = box[2]
  78. height = box[3]
  79. frame = self.drawPred(frame, classIds[i], confidences[i], left, top, left + width, top + height)
  80. return frame
  81. def drawPred(self, frame, classId, conf, left, top, right, bottom):
  82. # Draw a bounding box.
  83. cv2.rectangle(frame, (left, top), (right, bottom), (0, 0, 255), thickness=4)
  84. label = '%.2f' % conf
  85. label = '%s:%s' % (self.classes[classId], label)
  86. # Display the label at the top of the bounding box
  87. labelSize, baseLine = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.5, 1)
  88. top = max(top, labelSize[1])
  89. # cv.rectangle(frame, (left, top - round(1.5 * labelSize[1])), (left + round(1.5 * labelSize[0]), top + baseLine), (255,255,255), cv.FILLED)
  90. cv2.putText(frame, label, (left, top - 10), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 255, 0), thickness=2)
  91. return frame
  92. def detect(self, srcimg):
  93. img, newh, neww, top, left = self.resize_image(srcimg)
  94. img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
  95. img = img.astype(np.float32) / 255.0
  96. blob = np.expand_dims(np.transpose(img, (2, 0, 1)), axis=0)
  97. outs = self.net.run(None, {self.net.get_inputs()[0].name: blob})[0].squeeze(axis=0)
  98. row_ind = 0
  99. for i in range(self.nl):
  100. h, w = int(self.input_shape[0] / self.stride[i]), int(self.input_shape[1] / self.stride[i])
  101. length = int(self.na * h * w)
  102. if self.grid[i].shape[2:4] != (h, w):
  103. self.grid[i] = self._make_grid(w, h)
  104. outs[row_ind:row_ind + length, 0:2] = (outs[row_ind:row_ind + length, 0:2] * 2. - 0.5 + np.tile(
  105. self.grid[i], (self.na, 1))) * int(self.stride[i])
  106. outs[row_ind:row_ind + length, 2:4] = (outs[row_ind:row_ind + length, 2:4] * 2) ** 2 * np.repeat(
  107. self.anchor_grid[i], h * w, axis=0)
  108. row_ind += length
  109. srcimg = self.postprocess(srcimg, outs, (newh, neww, top, left))
  110. # cv2.imwrite('result.jpg', srcimg)
  111. return srcimg
  112. if __name__ == '__main__':
  113. parser = argparse.ArgumentParser()
  114. parser.add_argument('--imgpath', type=str, default="",
  115. help="image path")
  116. parser.add_argument('--modelpath', type=str, default="/media/xcy/dcd05f09-46df-4879-bfeb-3bab03a6cc3a/YOLOv5-Lite/weights/v5lite-e.onnx",
  117. help="onnx filepath")
  118. parser.add_argument('--classfile', type=str, default='coco.names',
  119. help="classname filepath")
  120. parser.add_argument('--confThreshold', default=0.5, type=float, help='class confidence')
  121. parser.add_argument('--nmsThreshold', default=0.6, type=float, help='nms iou thresh')
  122. args = parser.parse_args()
  123. # srcimg = cv2.imread(args.imgpath)
  124. # print(args.imgpath,srcimg)
  125. net = yolov5_lite(args.modelpath, args.classfile, confThreshold=args.confThreshold, nmsThreshold=args.nmsThreshold)
  126. print(net)
  127. counter = 0
  128. start_time = time.time()
  129. # 1 加载视频文件
  130. capture = cv2.VideoCapture(0)
  131. # 2 读取视频
  132. ret, frame = capture.read()
  133. fps = capture.get(cv2.CAP_PROP_FPS) # 视频平均帧率
  134. while ret:
  135. counter += 1 # 计算帧数
  136. if (time.time() - start_time) != 0: # 实时显示帧数
  137. cv2.putText(frame, "FPS {0}".format(float('%.1f' % (counter / (time.time() - start_time)))), (30, 50),
  138. cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255),
  139. 2)
  140. # 3 ret 是否读取到了帧,读取到了则为True
  141. cv2.imshow("video", frame)
  142. ret, frame = capture.read()
  143. print("FPS: ", counter / (time.time() - start_time))
  144. counter = 0
  145. start_time = time.time()
  146. srcimg = net.detect(frame)
  147. # winName = 'Deep learning object detection in onnxruntime'
  148. # cv2.namedWindow(winName, cv2.WINDOW_NORMAL)
  149. # cv2.imshow(winName, srcimg)
  150. # 4 若键盘按下q则退出播放
  151. if cv2.waitKey(20) & 0xff == ord('q'):
  152. break
  153. # 5 释放资源
  154. capture.release()
  155. # 6 关闭所有窗口
  156. cv2.destroyAllWindows()

3.使用NCNN+opencv来读取模型实时推理(C++版本

此版本能够在笔记本上达到33+FPS,正在整理代码。后续发

代码整理好了,如下:需要VS2019配置ncnn之后即可运行。

LINUX配置NCNN可以参考我的另一篇博客:Ubuntu20.04配置NCNN推理框架(转换yolov5 onnx格式到ncnn格式-CSDN博客

WINDOWS配置比较简单,大家搜一搜都能搜到。

  1. #include "layer.h"
  2. #include "net.h"
  3. #if defined(USE_NCNN_SIMPLEOCV)
  4. #include "simpleocv.h"
  5. #else
  6. #include <opencv2/core/core.hpp>
  7. #include <opencv2/highgui/highgui.hpp>
  8. #include <opencv2/imgproc/imgproc.hpp>
  9. #endif
  10. #include <float.h>
  11. #include <stdio.h>
  12. #include <vector>
  13. #include<iostream>
  14. #include <chrono>
  15. //#define YOLOV5_V60 1 //YOLOv5 v6.0
  16. #define YOLOV5_V62 1 //YOLOv5 v6.2 export onnx model method https://github.com/shaoshengsong/yolov5_62_export_ncnn
  17. #if YOLOV5_V60 || YOLOV5_V62
  18. #define MAX_STRIDE 64
  19. #else
  20. #define MAX_STRIDE 32
  21. class YoloV5Focus : public ncnn::Layer
  22. {
  23. public:
  24. YoloV5Focus()
  25. {
  26. one_blob_only = true;
  27. }
  28. virtual int forward(const ncnn::Mat& bottom_blob, ncnn::Mat& top_blob, const ncnn::Option& opt) const
  29. {
  30. int w = bottom_blob.w;
  31. int h = bottom_blob.h;
  32. int channels = bottom_blob.c;
  33. int outw = w / 2;
  34. int outh = h / 2;
  35. int outc = channels * 4;
  36. top_blob.create(outw, outh, outc, 4u, 1, opt.blob_allocator);
  37. if (top_blob.empty())
  38. return -100;
  39. #pragma omp parallel for num_threads(opt.num_threads)
  40. for (int p = 0; p < outc; p++)
  41. {
  42. const float* ptr = bottom_blob.channel(p % channels).row((p / channels) % 2) + ((p / channels) / 2);
  43. float* outptr = top_blob.channel(p);
  44. for (int i = 0; i < outh; i++)
  45. {
  46. for (int j = 0; j < outw; j++)
  47. {
  48. *outptr = *ptr;
  49. outptr += 1;
  50. ptr += 2;
  51. }
  52. ptr += w;
  53. }
  54. }
  55. return 0;
  56. }
  57. };
  58. DEFINE_LAYER_CREATOR(YoloV5Focus)
  59. #endif //YOLOV5_V60 YOLOV5_V62
  60. struct Object
  61. {
  62. cv::Rect_<float> rect;
  63. int label;
  64. float prob;
  65. };
  66. static inline float intersection_area(const Object& a, const Object& b)
  67. {
  68. cv::Rect_<float> inter = a.rect & b.rect;
  69. return inter.area();
  70. }
  71. static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
  72. {
  73. int i = left;//下标0
  74. int j = right; //下标最后一位
  75. float p = faceobjects[(left + right) / 2].prob; //取一个中轴
  76. while (i <= j)
  77. {
  78. while (faceobjects[i].prob > p) //如果前半段 的 大于 》 p
  79. i++; //下标前移
  80. while (faceobjects[j].prob < p) //如果后半段的小于 p
  81. j--; // j往中间
  82. if (i <= j)
  83. {
  84. // swap
  85. std::swap(faceobjects[i], faceobjects[j]); //前半段的和后半段的交换
  86. i++; // i前移
  87. j--; // j往中间
  88. }
  89. }
  90. #pragma omp parallel sections
  91. {
  92. #pragma omp section
  93. {
  94. if (left < j) qsort_descent_inplace(faceobjects, left, j);
  95. }
  96. #pragma omp section
  97. {
  98. if (i < right) qsort_descent_inplace(faceobjects, i, right);
  99. }
  100. }
  101. }
  102. static void qsort_descent_inplace(std::vector<Object>& faceobjects)
  103. {
  104. if (faceobjects.empty())
  105. return;
  106. qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
  107. }
  108. static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold, bool agnostic = false)
  109. {
  110. picked.clear();
  111. const int n = faceobjects.size();
  112. std::vector<float> areas(n);
  113. for (int i = 0; i < n; i++)
  114. {
  115. areas[i] = faceobjects[i].rect.area();
  116. }
  117. for (int i = 0; i < n; i++)
  118. {
  119. const Object& a = faceobjects[i];
  120. int keep = 1;
  121. for (int j = 0; j < (int)picked.size(); j++)
  122. {
  123. const Object& b = faceobjects[picked[j]];
  124. if (!agnostic && a.label != b.label)
  125. continue;
  126. // intersection over union
  127. float inter_area = intersection_area(a, b);
  128. float union_area = areas[i] + areas[picked[j]] - inter_area;
  129. // float IoU = inter_area / union_area
  130. if (inter_area / union_area > nms_threshold)
  131. keep = 0;
  132. }
  133. if (keep)
  134. picked.push_back(i);
  135. }
  136. }
  137. static inline float sigmoid(float x)
  138. {
  139. return static_cast<float>(1.f / (1.f + exp(-x)));
  140. }
  141. static void generate_proposals(const ncnn::Mat& anchors, int stride, const ncnn::Mat& in_pad, const ncnn::Mat& feat_blob, float prob_threshold, std::vector<Object>& objects)
  142. {
  143. const int num_grid = feat_blob.h;
  144. int num_grid_x;
  145. int num_grid_y;
  146. if (in_pad.w > in_pad.h)
  147. {
  148. num_grid_x = in_pad.w / stride;
  149. num_grid_y = num_grid / num_grid_x;
  150. }
  151. else
  152. {
  153. num_grid_y = in_pad.h / stride;
  154. num_grid_x = num_grid / num_grid_y;
  155. }
  156. const int num_class = feat_blob.w - 5; //特征图的w是85 是类别 80 + xywh + 目标置信度
  157. const int num_anchors = anchors.w / 2;
  158. for (int q = 0; q < num_anchors; q++)
  159. {
  160. const float anchor_w = anchors[q * 2];
  161. const float anchor_h = anchors[q * 2 + 1];
  162. const ncnn::Mat feat = feat_blob.channel(q);
  163. for (int i = 0; i < num_grid_y; i++)
  164. {
  165. for (int j = 0; j < num_grid_x; j++)
  166. {
  167. const float* featptr = feat.row(i * num_grid_x + j);
  168. float box_confidence = sigmoid(featptr[4]);
  169. if (box_confidence >= prob_threshold)
  170. {
  171. // find class index with max class score
  172. int class_index = 0;
  173. float class_score = -FLT_MAX;
  174. for (int k = 0; k < num_class; k++)
  175. {
  176. float score = featptr[5 + k];
  177. if (score > class_score)
  178. {
  179. class_index = k;
  180. class_score = score;
  181. }
  182. }
  183. float confidence = box_confidence * sigmoid(class_score);
  184. if (confidence >= prob_threshold)
  185. {
  186. // yolov5/models/yolo.py Detect forward
  187. // y = x[i].sigmoid()
  188. // y[..., 0:2] = (y[..., 0:2] * 2. - 0.5 + self.grid[i].to(x[i].device)) * self.stride[i] # xy
  189. // y[..., 2:4] = (y[..., 2:4] * 2) ** 2 * self.anchor_grid[i] # wh
  190. float dx = sigmoid(featptr[0]);
  191. float dy = sigmoid(featptr[1]);
  192. float dw = sigmoid(featptr[2]);
  193. float dh = sigmoid(featptr[3]);
  194. float pb_cx = (dx * 2.f - 0.5f + j) * stride;
  195. float pb_cy = (dy * 2.f - 0.5f + i) * stride;
  196. float pb_w = pow(dw * 2.f, 2) * anchor_w;
  197. float pb_h = pow(dh * 2.f, 2) * anchor_h;
  198. float x0 = pb_cx - pb_w * 0.5f;
  199. float y0 = pb_cy - pb_h * 0.5f;
  200. float x1 = pb_cx + pb_w * 0.5f;
  201. float y1 = pb_cy + pb_h * 0.5f;
  202. Object obj;
  203. obj.rect.x = x0;
  204. obj.rect.y = y0;
  205. obj.rect.width = x1 - x0;
  206. obj.rect.height = y1 - y0;
  207. obj.label = class_index;
  208. obj.prob = confidence;
  209. objects.push_back(obj);
  210. }
  211. }
  212. }
  213. }
  214. }
  215. }
  216. static int detect_yolov5(const cv::Mat& bgr, std::vector<Object>& objects, ncnn::Extractor ex)
  217. {
  218. const int target_size = 320;
  219. const float prob_threshold = 0.25f;
  220. const float nms_threshold = 0.45f;
  221. int img_w = bgr.cols;
  222. int img_h = bgr.rows;
  223. // letterbox pad to multiple of MAX_STRIDE
  224. int w = img_w;
  225. int h = img_h;
  226. float scale = 1.f;
  227. if (w > h)
  228. {
  229. scale = (float)target_size / w;
  230. w = target_size;
  231. h = h * scale;
  232. }
  233. else
  234. {
  235. scale = (float)target_size / h;
  236. h = target_size;
  237. w = w * scale;
  238. }
  239. ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, img_w, img_h, w, h);
  240. // pad to target_size rectangle
  241. // yolov5/utils/datasets.py letterbox
  242. int wpad = (w + MAX_STRIDE - 1) / MAX_STRIDE * MAX_STRIDE - w;
  243. int hpad = (h + MAX_STRIDE - 1) / MAX_STRIDE * MAX_STRIDE - h;
  244. ncnn::Mat in_pad;
  245. ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f);
  246. const float norm_vals[3] = { 1 / 255.f, 1 / 255.f, 1 / 255.f };
  247. in_pad.substract_mean_normalize(0, norm_vals);
  248. /*ncnn::Extractor ex = yolov5.create_extractor();*/
  249. ex.input("images", in_pad);
  250. std::vector<Object> proposals;
  251. // anchor setting from yolov5/models/yolov5s.yaml
  252. // stride 8
  253. {
  254. ncnn::Mat out;
  255. ex.extract("output", out);
  256. ncnn::Mat anchors(6);
  257. anchors[0] = 10.f;
  258. anchors[1] = 13.f;
  259. anchors[2] = 16.f;
  260. anchors[3] = 30.f;
  261. anchors[4] = 33.f;
  262. anchors[5] = 23.f;
  263. std::vector<Object> objects8;
  264. generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8);
  265. proposals.insert(proposals.end(), objects8.begin(), objects8.end());
  266. }
  267. // stride 16
  268. {
  269. ncnn::Mat out;
  270. #if YOLOV5_V62
  271. ex.extract("1111", out);
  272. #elif YOLOV5_V60
  273. ex.extract("376", out);
  274. #else
  275. ex.extract("781", out);
  276. #endif
  277. ncnn::Mat anchors(6);
  278. anchors[0] = 30.f;
  279. anchors[1] = 61.f;
  280. anchors[2] = 62.f;
  281. anchors[3] = 45.f;
  282. anchors[4] = 59.f;
  283. anchors[5] = 119.f;
  284. std::vector<Object> objects16;
  285. generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16);
  286. proposals.insert(proposals.end(), objects16.begin(), objects16.end());
  287. }
  288. // stride 32
  289. {
  290. ncnn::Mat out;
  291. #if YOLOV5_V62
  292. ex.extract("2222", out);
  293. #elif YOLOV5_V60
  294. ex.extract("401", out);
  295. #else
  296. ex.extract("801", out);
  297. #endif
  298. ncnn::Mat anchors(6);
  299. anchors[0] = 116.f;
  300. anchors[1] = 90.f;
  301. anchors[2] = 156.f;
  302. anchors[3] = 198.f;
  303. anchors[4] = 373.f;
  304. anchors[5] = 326.f;
  305. std::vector<Object> objects32;
  306. generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32);
  307. proposals.insert(proposals.end(), objects32.begin(), objects32.end());
  308. }
  309. // sort all proposals by score from highest to lowest
  310. qsort_descent_inplace(proposals);
  311. // apply nms with nms_threshold
  312. std::vector<int> picked;
  313. nms_sorted_bboxes(proposals, picked, nms_threshold);
  314. int count = picked.size();
  315. objects.resize(count);
  316. for (int i = 0; i < count; i++)
  317. {
  318. objects[i] = proposals[picked[i]];
  319. // adjust offset to original unpadded
  320. float x0 = (objects[i].rect.x - (wpad / 2)) / scale;
  321. float y0 = (objects[i].rect.y - (hpad / 2)) / scale;
  322. float x1 = (objects[i].rect.x + objects[i].rect.width - (wpad / 2)) / scale;
  323. float y1 = (objects[i].rect.y + objects[i].rect.height - (hpad / 2)) / scale;
  324. // clip
  325. x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
  326. y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
  327. x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
  328. y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);
  329. objects[i].rect.x = x0;
  330. objects[i].rect.y = y0;
  331. objects[i].rect.width = x1 - x0;
  332. objects[i].rect.height = y1 - y0;
  333. }
  334. return 0;
  335. }
  336. static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects, double fps)
  337. {
  338. static const char* class_names[] = {
  339. "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
  340. "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
  341. "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
  342. "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
  343. "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
  344. "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
  345. "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
  346. "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
  347. "hair drier", "toothbrush"
  348. };
  349. /*cv::Mat image = bgr.clone();*/
  350. cv::Mat image = bgr;
  351. for (size_t i = 0; i < objects.size(); i++)
  352. {
  353. const Object& obj = objects[i];
  354. fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
  355. obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
  356. cv::rectangle(image, obj.rect, cv::Scalar(255, 0, 0));
  357. char text[256];
  358. sprintf_s(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);
  359. int baseLine = 0;
  360. cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
  361. int x = obj.rect.x;
  362. int y = obj.rect.y - label_size.height - baseLine;
  363. if (y < 0)
  364. y = 0;
  365. if (x + label_size.width > image.cols)
  366. x = image.cols - label_size.width;
  367. cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
  368. cv::Scalar(255, 255, 255), -1);
  369. cv::putText(image, text, cv::Point(x, y + label_size.height),
  370. cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
  371. cv::putText(image, "FPS: " + std::to_string(fps), cv::Point(30, 50),
  372. cv::FONT_HERSHEY_SIMPLEX, 1.0, cv::Scalar(0, 0, 255), 2);
  373. }
  374. //cv::imshow("image", image);
  375. //cv::waitKey(1);
  376. }
  377. int main() {
  378. ncnn::Net yolov5;
  379. yolov5.opt.use_vulkan_compute = true;
  380. /*yolov5.opt.use_bf16_storage = true;*/
  381. yolov5.load_param("../model/v5lite-e.param");
  382. yolov5.load_model("../model/v5lite-e.bin");
  383. ncnn::Extractor ex = yolov5.create_extractor();
  384. /*ex.set_num_threads(4);*/
  385. cv::VideoCapture capture(0); // 打开默认摄像头
  386. int frameCount = 0;
  387. double totalTime = 0.f;
  388. double fps = 0.f;
  389. cv::Mat frame;
  390. std::vector<Object> objects;
  391. while (true) {
  392. auto start = std::chrono::high_resolution_clock::now();
  393. capture >> frame; // 读取摄像头的下一帧图像
  394. // yolo 检测
  395. detect_yolov5(frame, objects,ex);
  396. draw_objects(frame, objects,fps);
  397. // 显示结果
  398. cv::imshow("YOLO检测e", frame);
  399. // 更新帧统计信息
  400. frameCount++;
  401. auto end = std::chrono::high_resolution_clock::now();
  402. double timeSec = std::chrono::duration<double>(end - start).count();
  403. totalTime += timeSec;
  404. // 计算并显示帧率
  405. fps = frameCount / totalTime;
  406. std::cout << "帧率: " << fps << std::endl;
  407. int key = cv::waitKey(1);
  408. if (key == 27) { // 按下 ESC 键退出循环
  409. break;
  410. }
  411. }
  412. capture.release(); // 释放摄像头
  413. cv::destroyAllWindows(); // 关闭显示窗口
  414. return 0;
  415. }

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Monodyee/article/detail/140360?site
推荐阅读
相关标签
  

闽ICP备14008679号