当前位置:   article > 正文

yolov5 OpenCV DNN c++ 部署_yolov5使用opnecv进行c++推理部署

yolov5使用opnecv进行c++推理部署

一、转onnx格式

yolov5-6.2版本可以用export.py导出onnx格式的模型:

python export.py  --weights yolov5n.pt yolov5n.onnx 

yolov5n.pt我是直接在这里下载的:https://github.com/ultralytics/yolov5/releases/tag/v6.2

二、部署代码

yolo.hpp

  1. // https://github.com/UNeedCryDear/yolov5-opencv-dnn-cpp
  2. #pragma once
  3. #include<iostream>
  4. #include<opencv2/opencv.hpp>
  5. #define YOLO_P6 false //是否使用P6模型
  6. struct Output {
  7. int id; //结果类别id
  8. float confidence; //结果置信度
  9. cv::Rect box; //矩形框
  10. };
  11. class Yolov5 {
  12. public:
  13. Yolov5() {
  14. }
  15. ~Yolov5() {}
  16. bool readModel(cv::dnn::Net& net, std::string& netPath, bool isCuda);
  17. bool Detect(cv::Mat& SrcImg, cv::dnn::Net& net, std::vector<Output>& output);
  18. void drawPred(cv::Mat& img, std::vector<Output> result, std::vector<cv::Scalar> color);
  19. private:
  20. void LetterBox(const cv::Mat& image, cv::Mat& outImage,
  21. cv::Vec4d& params, //[ratio_x,ratio_y,dw,dh]
  22. const cv::Size& newShape = cv::Size(640, 640),
  23. bool autoShape = false,
  24. bool scaleFill = false,
  25. bool scaleUp = true,
  26. int stride = 32,
  27. const cv::Scalar& color = cv::Scalar(114, 114, 114));
  28. const int _netWidth = 640; //ONNX图片输入宽度
  29. const int _netHeight = 640; //ONNX图片输入高度
  30. float _classThreshold = 0.25;
  31. float _nmsThreshold = 0.45;
  32. std::vector<std::string> _className = { "person", "bicycle", "car", "motorcycle", "airplane", "bus", "train", "truck", "boat", "traffic light",
  33. "fire hydrant", "stop sign", "parking meter", "bench", "bird", "cat", "dog", "horse", "sheep", "cow",
  34. "elephant", "bear", "zebra", "giraffe", "backpack", "umbrella", "handbag", "tie", "suitcase", "frisbee",
  35. "skis", "snowboard", "sports ball", "kite", "baseball bat", "baseball glove", "skateboard", "surfboard",
  36. "tennis racket", "bottle", "wine glass", "cup", "fork", "knife", "spoon", "bowl", "banana", "apple",
  37. "sandwich", "orange", "broccoli", "carrot", "hot dog", "pizza", "donut", "cake", "chair", "couch",
  38. "potted plant", "bed", "dining table", "toilet", "tv", "laptop", "mouse", "remote", "keyboard", "cell phone",
  39. "microwave", "oven", "toaster", "sink", "refrigerator", "book", "clock", "vase", "scissors", "teddy bear",
  40. "hair drier", "toothbrush" };
  41. };

yolo.cpp

  1. #include "yolo.h"
  2. using namespace std;
  3. using namespace cv;
  4. using namespace cv::dnn;
  5. void Yolov5::LetterBox(const cv::Mat& image, cv::Mat& outImage, cv::Vec4d& params, const cv::Size& newShape,
  6. bool autoShape, bool scaleFill, bool scaleUp, int stride, const cv::Scalar& color)
  7. {
  8. Size shape = image.size();
  9. float r = std::min((float)newShape.width / (float)shape.width,
  10. (float)newShape.height / (float)shape.height); // 选出较小的缩放比,否则会超过
  11. float ratio[2]{r, r};
  12. int new_up_pad[2] ={(int)round((float)shape.width * r),
  13. (int)round((float)shape.height * r)}; // 缩放后与目标长宽可能还差一点
  14. auto dw = (float)(newShape.width - new_up_pad[0]);// 算出与目标长宽差多少
  15. auto dh = (float)(newShape.height - new_up_pad[1]);
  16. dw /= 2.0f;
  17. dh /= 2.0f;
  18. if (shape.width != new_up_pad[0] && shape.height != new_up_pad[1])//等比例缩放
  19. {
  20. resize(image, outImage, Size(new_up_pad[0], new_up_pad[1]));
  21. }
  22. else {
  23. outImage = image.clone();
  24. }
  25. int top = int(round(dh - 0.1f)); // 四周用0来填充
  26. int bottom = int(round(dh + 0.1f));
  27. int left = int(round(dw - 0.1f));
  28. int right = int(round(dw + 0.1f));
  29. params[0] = ratio[0];
  30. params[1] = ratio[1];
  31. params[2] = left;
  32. params[3] = top;
  33. copyMakeBorder(outImage, outImage, top, bottom, left, right,BORDER_CONSTANT,color);
  34. }
  35. bool Yolov5::readModel(Net &net, string &netPath, bool isCuda)
  36. {
  37. try {
  38. // net = readNet(netPath);
  39. net = readNetFromONNX(netPath);
  40. } catch (const std::exception&) {
  41. return false;
  42. }
  43. if (isCuda)
  44. {
  45. net.setPreferableBackend(DNN_BACKEND_CUDA);
  46. net.setPreferableTarget(DNN_TARGET_CUDA);
  47. }
  48. else
  49. {
  50. net.setPreferableBackend(DNN_BACKEND_OPENCV);
  51. net.setPreferableTarget(DNN_TARGET_CPU);
  52. }
  53. return true;
  54. }
  55. bool Yolov5::Detect(Mat &SrcImg, Net &net, vector<Output> &output)
  56. {
  57. Mat blob;
  58. int col = SrcImg.cols;
  59. int row = SrcImg.rows;
  60. int maxLen = MAX(col, row);
  61. Mat netinputImg = SrcImg.clone();
  62. Vec4d params;
  63. LetterBox(SrcImg, netinputImg, params, Size(_netWidth, _netHeight));
  64. blobFromImage(netinputImg, blob, 1/255.0, Size(_netWidth, _netHeight),Scalar(0,0,0), true, false);
  65. //如果在其他设置没有问题的情况下但是结果偏差很大,可以尝试下用下面两句语句
  66. //blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(104, 117, 123), true, false);
  67. //blobFromImage(netInputImg, blob, 1 / 255.0, cv::Size(_netWidth, _netHeight), cv::Scalar(114, 114,114), true, false);
  68. net.setInput(blob);
  69. vector<Mat> netOutputImg;
  70. net.forward(netOutputImg, net.getUnconnectedOutLayersNames());
  71. vector<int> classIds;
  72. vector<float> confidences;
  73. vector<Rect> boxes;
  74. float ratio_h = (float)netinputImg.rows / _netWidth; // 此时为1
  75. float ratio_w = (float)netinputImg.cols / _netWidth;
  76. int net_width = _className.size() + 5;
  77. int net_out_width = netOutputImg[0].size[2];
  78. CV_Assert(net_out_width == net_width);
  79. float* pdata = (float*)netOutputImg[0].data;
  80. int net_height = netOutputImg[0].size[1];
  81. for (int r = 0; r < net_height; ++r) // 下一个框)
  82. {
  83. float box_score = pdata[4];
  84. if (box_score >= _classThreshold)
  85. {
  86. Mat scores(1, _className.size(), CV_32FC1, pdata+5);
  87. Point classIdPoint;
  88. double max_class_score;
  89. minMaxLoc(scores, 0, &max_class_score,0, &classIdPoint);
  90. max_class_score = max_class_score * box_score;
  91. if(max_class_score > _classThreshold)
  92. {
  93. float x = (pdata[0] - params[2]) / params[0]; // 缩放、padding后,-》原图
  94. float y = (pdata[1] - params[3]) / params[1]; // params: out // in
  95. float w = pdata[2] / params[0];
  96. float h = pdata[3] / params[1];
  97. int left = MAX(round(x - 0.5 * w), 0);
  98. int top = MAX(round(y - 0.5*h), 0);
  99. classIds.push_back(classIdPoint.x);
  100. confidences.push_back(max_class_score);
  101. boxes.push_back(Rect(left, top, round( w * ratio_w), round(h * ratio_h)));// ??
  102. }
  103. }
  104. pdata += net_width;
  105. }
  106. // 执行非最大抑制以消除具有较低置信度的冗余重叠框(NMS)
  107. vector<int> nms_result;
  108. NMSBoxes(boxes,confidences,_classThreshold,_nmsThreshold,nms_result);
  109. for(size_t i =0; i<nms_result.size(); i++)
  110. {
  111. int idx = nms_result[i];
  112. Output result;
  113. result.id = classIds[idx];
  114. result.confidence = confidences[idx];
  115. result.box = boxes[idx];
  116. output.push_back(result);
  117. }
  118. if (output.size())
  119. return true;
  120. else
  121. return false;
  122. }
  123. void Yolov5::drawPred(Mat &img, vector<Output> result, vector<Scalar> color)
  124. {
  125. for (size_t i=0; i<result.size(); i++)
  126. {
  127. int left, top;
  128. left = result[i].box.x;
  129. top = result[i].box.y;
  130. int color_num = i;
  131. rectangle(img, result[i].box, color[result[i].id],2,8);
  132. string label = _className[result[i].id] + ":" + to_string(result[i].confidence);
  133. int baseLine;
  134. Size labeSize = getTextSize(label, FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
  135. top = top - labeSize.height;
  136. putText(img, label, Point(left, top), FONT_HERSHEY_SIMPLEX, 1, color[result[i].id], 2);
  137. }
  138. // imshow("1", img);
  139. // waitKey();
  140. }

main.cpp

  1. #include "yolo.h"
  2. #include<math.h>
  3. #include<iostream>
  4. using namespace std;
  5. using namespace cv;
  6. using namespace cv::dnn;
  7. #include "yolo.h"
  8. #include <iostream>
  9. //#include<opencv2//opencv.hpp>
  10. #include<math.h>
  11. using namespace std;
  12. using namespace cv;
  13. using namespace dnn;
  14. int main()
  15. {
  16. string img_path = "/home/jason/work/01-img/dog2.png";
  17. string model_path = "/home/jason/PycharmProjects/pytorch_learn/yolov5-6.2/yolov5n.onnx";
  18. //int num_devices = cv::cuda::getCudaEnabledDeviceCount();
  19. //if (num_devices <= 0) {
  20. //cerr << "There is no cuda." << endl;
  21. //return -1;
  22. //}
  23. //else {
  24. //cout << num_devices << endl;
  25. //}
  26. Yolov5 test;
  27. Net net;
  28. if (test.readModel(net, model_path, false)) {
  29. cout << "read net ok!" << endl;
  30. }
  31. else {
  32. return -1;
  33. }
  34. //生成随机颜色
  35. vector<Scalar> color;
  36. srand(time(0));
  37. for (int i = 0; i < 80; i++) {
  38. int b = rand() % 256;
  39. int g = rand() % 256;
  40. int r = rand() % 256;
  41. color.push_back(Scalar(b, g, r));
  42. }
  43. vector<Output> result;
  44. // Mat img = imread(img_path);
  45. VideoCapture capture(2);
  46. Mat img;
  47. while (1)
  48. {
  49. capture >>img;
  50. test.Detect(img, net, result);
  51. // if (test.Detect(img, net, result))
  52. {
  53. test.drawPred(img, result, color);
  54. result.erase(result.begin(), result.end());
  55. vector<double> layersTimes;
  56. double freq = getTickFrequency() / 1000; // https://blog.csdn.net/chaipp0607/article/details/71056580
  57. double t = net.getPerfProfile(layersTimes) / freq;
  58. string label = format("%s Inference time : %.2f ms", "yolov5n", t);
  59. putText(img, label, Point(0, 30), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0,0,255),2);
  60. // }
  61. // else
  62. // {
  63. // cout << "Detect Failed!"<<endl;
  64. // }
  65. imshow("1", img);
  66. if (waitKey(1) == 27) break;
  67. }
  68. // system("pause");
  69. return 0;
  70. }

三、总结:

我用摄像头直接读取处理,发现yolov5n 运行起来有点卡,AMD R5300H CPU 的耗时是150ms左右,而 同样设备Pytorch Python 推理 只有几十ms耗时!!

欢迎留言、欢迎交流!!

参考:

GitHub - UNeedCryDear/yolov5-opencv-dnn-cpp: 使用opencv模块部署yolov5-6.0版本

2021.11.01 c++下 opencv部署yolov5-6.0版本 (四)_怎么查看yolov5版本_爱晚乏客游的博客-CSDN博客

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小蓝xlanll/article/detail/494509
推荐阅读
相关标签
  

闽ICP备14008679号