当前位置:   article > 正文

树莓派4B使用NCNN部署Yolov5-lite_树莓派onnx模型yolov5

树莓派onnx模型yolov5

目录

文章目录

前言

一、树莓派配置NCNN

1.安装依赖

2.下载NCNN并编译

二、Yolov5-lite模型训练

1.源码地址

2.安装所需要的包 

3.训练自己的数据集(YOLO格式)

4.模型训练 

5.模型转换

6.onnx模型简化

三、树莓派部署lite模型

1.将onnx模型转换为ncnn

2.添加Yolov5-lite.cpp

3.修改eopt.param

 4.修改yolov5_lite.cpp

 5.修改CMakeLists.txt

 四、最终运行效果

总结


前言

记录一下流程,方便下次再用


一、树莓派配置NCNN

1.安装依赖

  1. sudo apt-get install git cmake
  2. sudo apt-get install -y gfortran
  3. sudo apt-get install -y libprotobuf-dev libleveldb-dev libsnappy-dev libopencv-dev libhdf5-serial-dev protobuf-compiler
  4. sudo apt-get install --no-install-recommends libboost-all-dev
  5. sudo apt-get install -y libgflags-dev libgoogle-glog-dev liblmdb-dev libatlas-base-dev

2.下载NCNN并编译

  1. $ git clone https://gitee.com/Tencent/ncnn.git
  2. cd ncnn
  3. mkdir build
  4. cd build
  5. cmake ..
  6. make -j4
  7. make install

完成后ncnn文件夹如下


二、Yolov5-lite模型训练

1.源码地址

https://gitee.com/seaflyren/YOLOv5-Lite

下载后的文件如图所示

2.安装所需要的包 

pip install -r requirements.txt

3.训练自己的数据集(YOLO格式)

data文件夹下新建mydata.yaml,复制coco.yaml内容并粘贴

根据自己的数据集修改类别数nc和类名classname以及训练集和验证集路径

修改模型yaml文件中的nc数,和mydata.yaml保持一致

4.模型训练 

以lite-e为例,打开终端输入命令

python train.py --weights '预训练权重路径/v5lite-e.pt' --data 'data/mydata.yaml' --cfg 'models/v5lite-e.yaml' --epoch 300 --batch-size 16 --adam

5.模型转换

python export.py --weights 'weights/last.pt' --batch-size 1 --img_size 320

6.onnx模型简化

使用onnx-simplifier对转换后的onnx进行简化

  1. pip install onnxsimplifier
  2. python -m onnxsim last.onnx e.onnx

三、树莓派部署lite模型

1.将onnx模型转换为ncnn

  1. cd ncnn/build
  2. ./tools/onnx/onnx2ncnn e.onnx e.param e.bin
  3. # 模型优化为fp16
  4. ./tools/onnxoptimize e.param e.bin eopt.param eopt.bin 65536

2.添加Yolov5-lite.cpp

  1. cd ncnn/examples
  2. touch yolov5_lite.cpp

将下面的代码复制到cpp文件中

  1. // Tencent is pleased to support the open source community by making ncnn available.
  2. //
  3. // Copyright (C) 2020 THL A29 Limited, a Tencent company. All rights reserved.
  4. //
  5. // Licensed under the BSD 3-Clause License (the "License"); you may not use this file except
  6. // in compliance with the License. You may obtain a copy of the License at
  7. //
  8. // https://opensource.org/licenses/BSD-3-Clause
  9. //
  10. // Unless required by applicable law or agreed to in writing, software distributed
  11. // under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR
  12. // CONDITIONS OF ANY KIND, either express or implied. See the License for the
  13. // specific language governing permissions and limitations under the License.
  14. #include "layer.h"
  15. #include "net.h"
  16. #if defined(USE_NCNN_SIMPLEOCV)
  17. #include "simpleocv.h"
  18. #else
  19. #include <opencv2/core/core.hpp>
  20. #include <opencv2/highgui/highgui.hpp>
  21. #include <opencv2/imgproc/imgproc.hpp>
  22. #endif
  23. #include <float.h>
  24. #include <stdio.h>
  25. #include <vector>
  26. #include <sys/time.h>
  27. // 0 : FP16
  28. // 1 : INT8
  29. #define USE_INT8 0
  30. // 0 : Image
  31. // 1 : Camera
  32. #define USE_CAMERA 1
  33. struct Object
  34. {
  35. cv::Rect_<float> rect;
  36. int label;
  37. float prob;
  38. };
  39. static inline float intersection_area(const Object& a, const Object& b)
  40. {
  41. cv::Rect_<float> inter = a.rect & b.rect;
  42. return inter.area();
  43. }
  44. static void qsort_descent_inplace(std::vector<Object>& faceobjects, int left, int right)
  45. {
  46. int i = left;
  47. int j = right;
  48. float p = faceobjects[(left + right) / 2].prob;
  49. while (i <= j)
  50. {
  51. while (faceobjects[i].prob > p)
  52. i++;
  53. while (faceobjects[j].prob < p)
  54. j--;
  55. if (i <= j)
  56. {
  57. // swap
  58. std::swap(faceobjects[i], faceobjects[j]);
  59. i++;
  60. j--;
  61. }
  62. }
  63. #pragma omp parallel sections
  64. {
  65. #pragma omp section
  66. {
  67. if (left < j) qsort_descent_inplace(faceobjects, left, j);
  68. }
  69. #pragma omp section
  70. {
  71. if (i < right) qsort_descent_inplace(faceobjects, i, right);
  72. }
  73. }
  74. }
  75. static void qsort_descent_inplace(std::vector<Object>& faceobjects)
  76. {
  77. if (faceobjects.empty())
  78. return;
  79. qsort_descent_inplace(faceobjects, 0, faceobjects.size() - 1);
  80. }
  81. static void nms_sorted_bboxes(const std::vector<Object>& faceobjects, std::vector<int>& picked, float nms_threshold)
  82. {
  83. picked.clear();
  84. const int n = faceobjects.size();
  85. std::vector<float> areas(n);
  86. for (int i = 0; i < n; i++)
  87. {
  88. areas[i] = faceobjects[i].rect.area();
  89. }
  90. for (int i = 0; i < n; i++)
  91. {
  92. const Object& a = faceobjects[i];
  93. int keep = 1;
  94. for (int j = 0; j < (int)picked.size(); j++)
  95. {
  96. const Object& b = faceobjects[picked[j]];
  97. // intersection over union
  98. float inter_area = intersection_area(a, b);
  99. float union_area = areas[i] + areas[picked[j]] - inter_area;
  100. // float IoU = inter_area / union_area
  101. if (inter_area / union_area > nms_threshold)
  102. keep = 0;
  103. }
  104. if (keep)
  105. picked.push_back(i);
  106. }
  107. }
  108. static inline float sigmoid(float x)
  109. {
  110. return static_cast<float>(1.f / (1.f + exp(-x)));
  111. }
  112. // unsigmoid
  113. static inline float unsigmoid(float y) {
  114. return static_cast<float>(-1.0 * (log((1.0 / y) - 1.0)));
  115. }
  116. static void generate_proposals(const ncnn::Mat &anchors, int stride, const ncnn::Mat &in_pad,
  117. const ncnn::Mat &feat_blob, float prob_threshold,
  118. std::vector <Object> &objects) {
  119. const int num_grid = feat_blob.h;
  120. float unsig_pro = 0;
  121. if (prob_threshold > 0.6)
  122. unsig_pro = unsigmoid(prob_threshold);
  123. int num_grid_x;
  124. int num_grid_y;
  125. if (in_pad.w > in_pad.h) {
  126. num_grid_x = in_pad.w / stride;
  127. num_grid_y = num_grid / num_grid_x;
  128. } else {
  129. num_grid_y = in_pad.h / stride;
  130. num_grid_x = num_grid / num_grid_y;
  131. }
  132. const int num_class = feat_blob.w - 5;
  133. const int num_anchors = anchors.w / 2;
  134. for (int q = 0; q < num_anchors; q++) {
  135. const float anchor_w = anchors[q * 2];
  136. const float anchor_h = anchors[q * 2 + 1];
  137. const ncnn::Mat feat = feat_blob.channel(q);
  138. for (int i = 0; i < num_grid_y; i++) {
  139. for (int j = 0; j < num_grid_x; j++) {
  140. const float *featptr = feat.row(i * num_grid_x + j);
  141. // find class index with max class score
  142. int class_index = 0;
  143. float class_score = -FLT_MAX;
  144. float box_score = featptr[4];
  145. if (prob_threshold > 0.6) {
  146. // while prob_threshold > 0.6, unsigmoid better than sigmoid
  147. if (box_score > unsig_pro) {
  148. for (int k = 0; k < num_class; k++) {
  149. float score = featptr[5 + k];
  150. if (score > class_score) {
  151. class_index = k;
  152. class_score = score;
  153. }
  154. }
  155. float confidence = sigmoid(box_score) * sigmoid(class_score);
  156. if (confidence >= prob_threshold) {
  157. float dx = sigmoid(featptr[0]);
  158. float dy = sigmoid(featptr[1]);
  159. float dw = sigmoid(featptr[2]);
  160. float dh = sigmoid(featptr[3]);
  161. float pb_cx = (dx * 2.f - 0.5f + j) * stride;
  162. float pb_cy = (dy * 2.f - 0.5f + i) * stride;
  163. float pb_w = pow(dw * 2.f, 2) * anchor_w;
  164. float pb_h = pow(dh * 2.f, 2) * anchor_h;
  165. float x0 = pb_cx - pb_w * 0.5f;
  166. float y0 = pb_cy - pb_h * 0.5f;
  167. float x1 = pb_cx + pb_w * 0.5f;
  168. float y1 = pb_cy + pb_h * 0.5f;
  169. Object obj;
  170. obj.rect.x = x0;
  171. obj.rect.y = y0;
  172. obj.rect.width = x1 - x0;
  173. obj.rect.height = y1 - y0;
  174. obj.label = class_index;
  175. obj.prob = confidence;
  176. objects.push_back(obj);
  177. }
  178. } else {
  179. for (int k = 0; k < num_class; k++) {
  180. float score = featptr[5 + k];
  181. if (score > class_score) {
  182. class_index = k;
  183. class_score = score;
  184. }
  185. }
  186. float confidence = sigmoid(box_score) * sigmoid(class_score);
  187. if (confidence >= prob_threshold) {
  188. float dx = sigmoid(featptr[0]);
  189. float dy = sigmoid(featptr[1]);
  190. float dw = sigmoid(featptr[2]);
  191. float dh = sigmoid(featptr[3]);
  192. float pb_cx = (dx * 2.f - 0.5f + j) * stride;
  193. float pb_cy = (dy * 2.f - 0.5f + i) * stride;
  194. float pb_w = pow(dw * 2.f, 2) * anchor_w;
  195. float pb_h = pow(dh * 2.f, 2) * anchor_h;
  196. float x0 = pb_cx - pb_w * 0.5f;
  197. float y0 = pb_cy - pb_h * 0.5f;
  198. float x1 = pb_cx + pb_w * 0.5f;
  199. float y1 = pb_cy + pb_h * 0.5f;
  200. Object obj;
  201. obj.rect.x = x0;
  202. obj.rect.y = y0;
  203. obj.rect.width = x1 - x0;
  204. obj.rect.height = y1 - y0;
  205. obj.label = class_index;
  206. obj.prob = confidence;
  207. objects.push_back(obj);
  208. }
  209. }
  210. }
  211. }
  212. }
  213. }
  214. }
  215. static int detect_yolov5(const cv::Mat& bgr, std::vector<Object>& objects)
  216. {
  217. ncnn::Net yolov5;
  218. #if USE_INT8
  219. yolov5.opt.use_int8_inference=true;
  220. #else
  221. yolov5.opt.use_vulkan_compute = true;
  222. yolov5.opt.use_bf16_storage = true;
  223. #endif
  224. // original pretrained model from https://github.com/ultralytics/yolov5
  225. // the ncnn model https://github.com/nihui/ncnn-assets/tree/master/models
  226. #if USE_INT8
  227. yolov5.load_param("/home/corvin/Mask/weights/e.param");
  228. yolov5.load_model("/home/corvin/Mask/weights/e.bin");
  229. #else
  230. yolov5.load_param("/home/corvin/Mask/weights/eopt.param");
  231. yolov5.load_model("/home/corvin/Mask/weights/eopt.bin");
  232. #endif
  233. const int target_size = 320;
  234. const float prob_threshold = 0.60f;
  235. const float nms_threshold = 0.60f;
  236. int img_w = bgr.cols;
  237. int img_h = bgr.rows;
  238. // letterbox pad to multiple of 32
  239. int w = img_w;
  240. int h = img_h;
  241. float scale = 1.f;
  242. if (w > h)
  243. {
  244. scale = (float)target_size / w;
  245. w = target_size;
  246. h = h * scale;
  247. }
  248. else
  249. {
  250. scale = (float)target_size / h;
  251. h = target_size;
  252. w = w * scale;
  253. }
  254. ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR2RGB, img_w, img_h, w, h);
  255. // pad to target_size rectangle
  256. // yolov5/utils/datasets.py letterbox
  257. int wpad = (w + 31) / 32 * 32 - w;
  258. int hpad = (h + 31) / 32 * 32 - h;
  259. ncnn::Mat in_pad;
  260. ncnn::copy_make_border(in, in_pad, hpad / 2, hpad - hpad / 2, wpad / 2, wpad - wpad / 2, ncnn::BORDER_CONSTANT, 114.f);
  261. const float norm_vals[3] = {1 / 255.f, 1 / 255.f, 1 / 255.f};
  262. in_pad.substract_mean_normalize(0, norm_vals);
  263. ncnn::Extractor ex = yolov5.create_extractor();
  264. ex.input("images", in_pad);
  265. std::vector<Object> proposals;
  266. // stride 8
  267. {
  268. ncnn::Mat out;
  269. ex.extract("451", out);
  270. ncnn::Mat anchors(6);
  271. anchors[0] = 10.f;
  272. anchors[1] = 13.f;
  273. anchors[2] = 16.f;
  274. anchors[3] = 30.f;
  275. anchors[4] = 33.f;
  276. anchors[5] = 23.f;
  277. std::vector<Object> objects8;
  278. generate_proposals(anchors, 8, in_pad, out, prob_threshold, objects8);
  279. proposals.insert(proposals.end(), objects8.begin(), objects8.end());
  280. }
  281. // stride 16
  282. {
  283. ncnn::Mat out;
  284. ex.extract("479", out);
  285. ncnn::Mat anchors(6);
  286. anchors[0] = 30.f;
  287. anchors[1] = 61.f;
  288. anchors[2] = 62.f;
  289. anchors[3] = 45.f;
  290. anchors[4] = 59.f;
  291. anchors[5] = 119.f;
  292. std::vector<Object> objects16;
  293. generate_proposals(anchors, 16, in_pad, out, prob_threshold, objects16);
  294. proposals.insert(proposals.end(), objects16.begin(), objects16.end());
  295. }
  296. // stride 32
  297. {
  298. ncnn::Mat out;
  299. ex.extract("507", out);
  300. ncnn::Mat anchors(6);
  301. anchors[0] = 116.f;
  302. anchors[1] = 90.f;
  303. anchors[2] = 156.f;
  304. anchors[3] = 198.f;
  305. anchors[4] = 373.f;
  306. anchors[5] = 326.f;
  307. std::vector<Object> objects32;
  308. generate_proposals(anchors, 32, in_pad, out, prob_threshold, objects32);
  309. proposals.insert(proposals.end(), objects32.begin(), objects32.end());
  310. }
  311. // sort all proposals by score from highest to lowest
  312. qsort_descent_inplace(proposals);
  313. // apply nms with nms_threshold
  314. std::vector<int> picked;
  315. nms_sorted_bboxes(proposals, picked, nms_threshold);
  316. int count = picked.size();
  317. objects.resize(count);
  318. for (int i = 0; i < count; i++)
  319. {
  320. objects[i] = proposals[picked[i]];
  321. // adjust offset to original unpadded
  322. float x0 = (objects[i].rect.x - (wpad / 2)) / scale;
  323. float y0 = (objects[i].rect.y - (hpad / 2)) / scale;
  324. float x1 = (objects[i].rect.x + objects[i].rect.width - (wpad / 2)) / scale;
  325. float y1 = (objects[i].rect.y + objects[i].rect.height - (hpad / 2)) / scale;
  326. // clip
  327. x0 = std::max(std::min(x0, (float)(img_w - 1)), 0.f);
  328. y0 = std::max(std::min(y0, (float)(img_h - 1)), 0.f);
  329. x1 = std::max(std::min(x1, (float)(img_w - 1)), 0.f);
  330. y1 = std::max(std::min(y1, (float)(img_h - 1)), 0.f);
  331. objects[i].rect.x = x0;
  332. objects[i].rect.y = y0;
  333. objects[i].rect.width = x1 - x0;
  334. objects[i].rect.height = y1 - y0;
  335. }
  336. return 0;
  337. }
  338. static void draw_objects(const cv::Mat& bgr, const std::vector<Object>& objects)
  339. {
  340. static const char* class_names[] = {
  341. "face","face_mask"
  342. };
  343. cv::Mat image = bgr.clone();
  344. for (size_t i = 0; i < objects.size(); i++)
  345. {
  346. const Object& obj = objects[i];
  347. fprintf(stderr, "%d = %.5f at %.2f %.2f %.2f x %.2f\n", obj.label, obj.prob,
  348. obj.rect.x, obj.rect.y, obj.rect.width, obj.rect.height);
  349. cv::rectangle(image, obj.rect, cv::Scalar(0, 255, 0));
  350. char text[256];
  351. sprintf(text, "%s %.1f%%", class_names[obj.label], obj.prob * 100);
  352. int baseLine = 0;
  353. cv::Size label_size = cv::getTextSize(text, cv::FONT_HERSHEY_SIMPLEX, 0.5, 1, &baseLine);
  354. int x = obj.rect.x;
  355. int y = obj.rect.y - label_size.height - baseLine;
  356. if (y < 0)
  357. y = 0;
  358. if (x + label_size.width > image.cols)
  359. x = image.cols - label_size.width;
  360. cv::rectangle(image, cv::Rect(cv::Point(x, y), cv::Size(label_size.width, label_size.height + baseLine)),
  361. cv::Scalar(255, 255, 255), -1);
  362. cv::putText(image, text, cv::Point(x, y + label_size.height),
  363. cv::FONT_HERSHEY_SIMPLEX, 0.5, cv::Scalar(0, 0, 0));
  364. }
  365. #if USE_CAMERA
  366. imshow("camera", image);
  367. cv::waitKey(1);
  368. #else
  369. cv::imwrite("result.jpg", image);
  370. #endif
  371. }
  372. #if USE_CAMERA
  373. int main(int argc, char** argv)
  374. {
  375. cv::VideoCapture capture;
  376. capture.open(0); //修改这个参数可以选择打开想要用的摄像头
  377. cv::Mat frame;
  378. while (true)
  379. {
  380. capture >> frame;
  381. cv::Mat m = frame;
  382. std::vector<Object> objects;
  383. detect_yolov5(frame, objects);
  384. draw_objects(m, objects);
  385. if (cv::waitKey(30) >= 0)
  386. break;
  387. }
  388. }
  389. #else
  390. int main(int argc, char** argv)
  391. {
  392. if (argc != 2)
  393. {
  394. fprintf(stderr, "Usage: %s [imagepath]\n", argv[0]);
  395. return -1;
  396. }
  397. const char* imagepath = argv[1];
  398. struct timespec begin, end;
  399. long time;
  400. clock_gettime(CLOCK_MONOTONIC, &begin);
  401. cv::Mat m = cv::imread(imagepath, 1);
  402. if (m.empty())
  403. {
  404. fprintf(stderr, "cv::imread %s failed\n", imagepath);
  405. return -1;
  406. }
  407. std::vector<Object> objects;
  408. detect_yolov5(m, objects);
  409. clock_gettime(CLOCK_MONOTONIC, &end);
  410. time = (end.tv_sec - begin.tv_sec) + (end.tv_nsec - begin.tv_nsec);
  411. printf(">> Time : %lf ms\n", (double)time/1000000);
  412. draw_objects(m, objects);
  413. return 0;
  414. }
  415. #endif

3.修改eopt.param

bug1:Squeeze not supported yet!

生成param文件时如果遇到Squeeze not supported yet!等提示,解决方法为使用onnxsimplifier优化onnx模型在转换为param

打开eopt.param,将所有Reshape修改为0=-1,此步是为了能够动态输入

 4.修改yolov5_lite.cpp

bug2:Segmentation Fault

这是由于未修改cpp中ex.extract()和permute保持一致

打开v5lite-e.yaml

 根据anchors修改cpp内容,需要保持一致

 

 打开eopt.param,根据permute修改cpp文件

 

 

 5.修改CMakeLists.txt

打开examples/CMakeLists.txt ,添加ncnn_add_example(yolov5_lite) ,注意和文件名保持一致

完成后使用cmake编译 

  1. cd ncnn/build
  2. cmake ..
  3. make

 四、最终运行效果


总结

yolov5_lite部署后,树莓派识别还是很流畅的

本文内容由网友自发贡献,转载请注明出处:https://www.wpsshop.cn/w/你好赵伟/article/detail/443595
推荐阅读
相关标签
  

闽ICP备14008679号