当前位置:   article > 正文

3588板子部署yoloV5_rk3588非量化部署yolo

rk3588非量化部署yolo

一 :准备 ubuntu linux X86_64系统

        a.安装anaconda

        b.创建虚拟环境      python=3.8

二: 下载rknn-toolkit2

传送门

unzip 解压文件夹   

三:pt转onnx模型

四:onnx转rknn模型

        a:cd到rknn-toolkit2-master/rknn-toolkit2/packages目录下

        cp表示python的版本号  如果不知道版本的话  输入python __version

        b:     pip install  -r requirements_cp38-1.6.0.txt 安装环境

        c:      pip install rknn_toolkit2-1.4.0_22dcfef4-cp38-cp38-linux_x86_64.whl

        d:      from rknn.api import RKNN  如果不报错则安装成功

        e:    去到knn-toolkit2-master/rknn-toolkit2/examples/onnx/yolov5目录下修改test.py

                  修改成自己的

         f:  修改后 运行pyhton test.py  根目录会生成 .rknn的模型以及result.jpg

        

       

 五:3588部署

获取瑞星微官网 rknpu 

         传送门

在model目录下放入推理视频h264或h265格式

./rknn_yolov5_video_demo ./model/RK3588/yolov5n_result.rknn ./model/a.mp4 264265

至此推理结束

==================================24.3.7更新============================

修改了rknn_yolov5_demo读取视频操作

  1. // Copyright (c) 2021 by Rockchip Electronics Co., Ltd. All Rights Reserved.
  2. //
  3. // Licensed under the Apache License, Version 2.0 (the "License");
  4. // you may not use this file except in compliance with the License.
  5. // You may obtain a copy of the License at
  6. //
  7. // http://www.apache.org/licenses/LICENSE-2.0
  8. //
  9. // Unless required by applicable law or agreed to in writing, software
  10. // distributed under the License is distributed on an "AS IS" BASIS,
  11. // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  12. // See the License for the specific language governing permissions and
  13. // limitations under the License.
  14. /*-------------------------------------------
  15. Includes
  16. -------------------------------------------*/
  17. #include <dlfcn.h>
  18. #include <stdio.h>
  19. #include <stdlib.h>
  20. #include <string.h>
  21. #include <sys/time.h>
  22. #define _BASETSD_H
  23. #include "RgaUtils.h"
  24. #include "postprocess.h"
  25. #include "rknn_api.h"
  26. #include "preprocess.h"
  27. #define PERF_WITH_POST 1
  28. /*-------------------------------------------
  29. Functions
  30. -------------------------------------------*/
  31. static void dump_tensor_attr(rknn_tensor_attr *attr)
  32. {
  33. std::string shape_str = attr->n_dims < 1 ? "" : std::to_string(attr->dims[0]);
  34. for (int i = 1; i < attr->n_dims; ++i)
  35. {
  36. shape_str += ", " + std::to_string(attr->dims[i]);
  37. }
  38. printf(" index=%d, name=%s, n_dims=%d, dims=[%s], n_elems=%d, size=%d, w_stride = %d, size_with_stride=%d, fmt=%s, "
  39. "type=%s, qnt_type=%s, "
  40. "zp=%d, scale=%f\n",
  41. attr->index, attr->name, attr->n_dims, shape_str.c_str(), attr->n_elems, attr->size, attr->w_stride,
  42. attr->size_with_stride, get_format_string(attr->fmt), get_type_string(attr->type),
  43. get_qnt_type_string(attr->qnt_type), attr->zp, attr->scale);
  44. }
  45. double __get_us(struct timeval t) { return (t.tv_sec * 1000000 + t.tv_usec); }
  46. static unsigned char *load_data(FILE *fp, size_t ofst, size_t sz)
  47. {
  48. unsigned char *data;
  49. int ret;
  50. data = NULL;
  51. if (NULL == fp)
  52. {
  53. return NULL;
  54. }
  55. ret = fseek(fp, ofst, SEEK_SET);
  56. if (ret != 0)
  57. {
  58. printf("blob seek failure.\n");
  59. return NULL;
  60. }
  61. data = (unsigned char *)malloc(sz);
  62. if (data == NULL)
  63. {
  64. printf("buffer malloc failure.\n");
  65. return NULL;
  66. }
  67. ret = fread(data, 1, sz, fp);
  68. return data;
  69. }
  70. static unsigned char *load_model(const char *filename, int *model_size)
  71. {
  72. FILE *fp;
  73. unsigned char *data;
  74. fp = fopen(filename, "rb");
  75. if (NULL == fp)
  76. {
  77. printf("Open file %s failed.\n", filename);
  78. return NULL;
  79. }
  80. fseek(fp, 0, SEEK_END);
  81. int size = ftell(fp);
  82. data = load_data(fp, 0, size);
  83. fclose(fp);
  84. *model_size = size;
  85. return data;
  86. }
  87. static int saveFloat(const char *file_name, float *output, int element_size)
  88. {
  89. FILE *fp;
  90. fp = fopen(file_name, "w");
  91. for (int i = 0; i < element_size; i++)
  92. {
  93. fprintf(fp, "%.6f\n", output[i]);
  94. }
  95. fclose(fp);
  96. return 0;
  97. }
  98. /*-------------------------------------------
  99. Main Functions
  100. -------------------------------------------*/
  101. int main(int argc, char **argv)
  102. {
  103. if (argc < 2)
  104. {
  105. printf("Usage: %s <rknn model> <input_image_path> <resize/letterbox> <output_image_path>\n", argv[0]);
  106. return -1;
  107. }
  108. int ret;
  109. rknn_context ctx;
  110. size_t actual_size = 0;
  111. int img_width = 0;
  112. int img_height = 0;
  113. int img_channel = 0;
  114. const float nms_threshold = NMS_THRESH; // 默认的NMS阈值
  115. const float box_conf_threshold = BOX_THRESH; // 默认的置信度阈值
  116. struct timeval start_time, stop_time;
  117. char *model_name = (char *)argv[1];
  118. //char *input_path = argv[2];
  119. std::string option = "letterbox";
  120. std::string out_path = "./out.jpg";
  121. if (argc >= 4)
  122. {
  123. option = argv[3];
  124. }
  125. if (argc >= 5)
  126. {
  127. out_path = argv[4];
  128. }
  129. // init rga context
  130. rga_buffer_t src;
  131. rga_buffer_t dst;
  132. memset(&src, 0, sizeof(src));
  133. memset(&dst, 0, sizeof(dst));
  134. printf("post process config: box_conf_threshold = %.2f, nms_threshold = %.2f\n", box_conf_threshold, nms_threshold);
  135. /* Create the neural network */
  136. printf("Loading mode...\n");
  137. int model_data_size = 0;
  138. unsigned char *model_data = load_model(model_name, &model_data_size);
  139. ret = rknn_init(&ctx, model_data, model_data_size, 0, NULL);
  140. if (ret < 0)
  141. {
  142. printf("rknn_init error ret=%d\n", ret);
  143. return -1;
  144. }
  145. rknn_sdk_version version;
  146. ret = rknn_query(ctx, RKNN_QUERY_SDK_VERSION, &version, sizeof(rknn_sdk_version));
  147. if (ret < 0)
  148. {
  149. printf("rknn_init error ret=%d\n", ret);
  150. return -1;
  151. }
  152. printf("sdk version: %s driver version: %s\n", version.api_version, version.drv_version);
  153. rknn_input_output_num io_num;
  154. ret = rknn_query(ctx, RKNN_QUERY_IN_OUT_NUM, &io_num, sizeof(io_num));
  155. if (ret < 0)
  156. {
  157. printf("rknn_init error ret=%d\n", ret);
  158. return -1;
  159. }
  160. printf("model input num: %d, output num: %d\n", io_num.n_input, io_num.n_output);
  161. rknn_tensor_attr input_attrs[io_num.n_input];
  162. memset(input_attrs, 0, sizeof(input_attrs));
  163. for (int i = 0; i < io_num.n_input; i++)
  164. {
  165. input_attrs[i].index = i;
  166. ret = rknn_query(ctx, RKNN_QUERY_INPUT_ATTR, &(input_attrs[i]), sizeof(rknn_tensor_attr));
  167. if (ret < 0)
  168. {
  169. printf("rknn_init error ret=%d\n", ret);
  170. return -1;
  171. }
  172. dump_tensor_attr(&(input_attrs[i]));
  173. }
  174. rknn_tensor_attr output_attrs[io_num.n_output];
  175. memset(output_attrs, 0, sizeof(output_attrs));
  176. for (int i = 0; i < io_num.n_output; i++)
  177. {
  178. output_attrs[i].index = i;
  179. ret = rknn_query(ctx, RKNN_QUERY_OUTPUT_ATTR, &(output_attrs[i]), sizeof(rknn_tensor_attr));
  180. dump_tensor_attr(&(output_attrs[i]));
  181. }
  182. int channel = 3;
  183. int width = 0;
  184. int height = 0;
  185. if (input_attrs[0].fmt == RKNN_TENSOR_NCHW)
  186. {
  187. printf("model is NCHW input fmt\n");
  188. channel = input_attrs[0].dims[1];
  189. height = input_attrs[0].dims[2];
  190. width = input_attrs[0].dims[3];
  191. }
  192. else
  193. {
  194. printf("model is NHWC input fmt\n");
  195. height = input_attrs[0].dims[1];
  196. width = input_attrs[0].dims[2];
  197. channel = input_attrs[0].dims[3];
  198. }
  199. printf("model input height=%d, width=%d, channel=%d\n", height, width, channel);
  200. rknn_input inputs[1];
  201. memset(inputs, 0, sizeof(inputs));
  202. inputs[0].index = 0;
  203. inputs[0].type = RKNN_TENSOR_UINT8;
  204. inputs[0].size = width * height * channel;
  205. inputs[0].fmt = RKNN_TENSOR_NHWC;
  206. inputs[0].pass_through = 0;
  207. // 读取图片
  208. //printf("Read %s ...\n", input_path);
  209. cv::VideoCapture cap("/home/pi/rknpu2-master/examples/rknn_yolov5_demo/model/sample.mp4");
  210. cv::Mat frame;
  211. printf("aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa");
  212. while(cap.read(frame)){
  213. //cv::imshow("frame",frame);
  214. cv::Mat orig_img = frame.clone();
  215. //cv::Mat orig_img = cv::imread(input_path, 1);
  216. if (!orig_img.data)
  217. {
  218. printf("cv::imread fail!\n");
  219. return -1;
  220. }
  221. cv::Mat img;
  222. cv::cvtColor(orig_img, img, cv::COLOR_BGR2RGB);
  223. img_width = img.cols;
  224. img_height = img.rows;
  225. printf("img width = %d, img height = %d\n", img_width, img_height);
  226. // 指定目标大小和预处理方式,默认使用LetterBox的预处理
  227. BOX_RECT pads;
  228. memset(&pads, 0, sizeof(BOX_RECT));
  229. cv::Size target_size(width, height);
  230. cv::Mat resized_img(target_size.height, target_size.width, CV_8UC3);
  231. // 计算缩放比例
  232. float scale_w = (float)target_size.width / img.cols;
  233. float scale_h = (float)target_size.height / img.rows;
  234. if (img_width != width || img_height != height)
  235. {
  236. // 直接缩放采用RGA加速
  237. if (option == "resize")
  238. {
  239. printf("resize image by rga\n");
  240. ret = resize_rga(src, dst, img, resized_img, target_size);
  241. if (ret != 0)
  242. {
  243. fprintf(stderr, "resize with rga error\n");
  244. return -1;
  245. }
  246. // 保存预处理图片
  247. cv::imwrite("resize_input.jpg", resized_img);
  248. }
  249. else if (option == "letterbox")
  250. {
  251. printf("resize image with letterbox\n");
  252. float min_scale = std::min(scale_w, scale_h);
  253. scale_w = min_scale;
  254. scale_h = min_scale;
  255. letterbox(img, resized_img, pads, min_scale, target_size);
  256. // 保存预处理图片
  257. cv::imwrite("letterbox_input.jpg", resized_img);
  258. }
  259. else
  260. {
  261. fprintf(stderr, "Invalid resize option. Use 'resize' or 'letterbox'.\n");
  262. return -1;
  263. }
  264. inputs[0].buf = resized_img.data;
  265. }
  266. else
  267. {
  268. inputs[0].buf = img.data;
  269. }
  270. gettimeofday(&start_time, NULL);
  271. rknn_inputs_set(ctx, io_num.n_input, inputs);
  272. rknn_output outputs[io_num.n_output];
  273. memset(outputs, 0, sizeof(outputs));
  274. for (int i = 0; i < io_num.n_output; i++)
  275. {
  276. outputs[i].want_float = 0;
  277. }
  278. // 执行推理
  279. ret = rknn_run(ctx, NULL);
  280. ret = rknn_outputs_get(ctx, io_num.n_output, outputs, NULL);
  281. gettimeofday(&stop_time, NULL);
  282. printf("once run use %f ms\n", (__get_us(stop_time) - __get_us(start_time)) / 1000);
  283. // 后处理
  284. detect_result_group_t detect_result_group;
  285. std::vector<float> out_scales;
  286. std::vector<int32_t> out_zps;
  287. for (int i = 0; i < io_num.n_output; ++i)
  288. {
  289. out_scales.push_back(output_attrs[i].scale);
  290. out_zps.push_back(output_attrs[i].zp);
  291. }
  292. post_process((int8_t *)outputs[0].buf, (int8_t *)outputs[1].buf, (int8_t *)outputs[2].buf, height, width,
  293. box_conf_threshold, nms_threshold, pads, scale_w, scale_h, out_zps, out_scales, &detect_result_group);
  294. // 画框和概率
  295. char text[256];
  296. for (int i = 0; i < detect_result_group.count; i++)
  297. {
  298. detect_result_t *det_result = &(detect_result_group.results[i]);
  299. sprintf(text, "%s %.1f%%", det_result->name, det_result->prop * 100);
  300. printf("%s @ (%d %d %d %d) %f\n", det_result->name, det_result->box.left, det_result->box.top,
  301. det_result->box.right, det_result->box.bottom, det_result->prop);
  302. int x1 = det_result->box.left;
  303. int y1 = det_result->box.top;
  304. int x2 = det_result->box.right;
  305. int y2 = det_result->box.bottom;
  306. rectangle(orig_img, cv::Point(x1, y1), cv::Point(x2, y2), cv::Scalar(256, 0, 0, 256), 3);
  307. putText(orig_img, text, cv::Point(x1, y1 + 12), cv::FONT_HERSHEY_SIMPLEX, 0.4, cv::Scalar(255, 255, 255));
  308. }
  309. printf("save detect result to %s\n", out_path.c_str());
  310. imwrite(out_path, orig_img);
  311. imshow("aaa",orig_img);
  312. cv::waitKey(1);
  313. ret = rknn_outputs_release(ctx, io_num.n_output, outputs);
  314. // 耗时统计
  315. //int test_count = 10;
  316. //gettimeofday(&start_time, NULL);
  317. //for (int i = 0; i < test_count; ++i)
  318. //{
  319. //rknn_inputs_set(ctx, io_num.n_input, inputs);
  320. // ret = rknn_run(ctx, NULL);
  321. // ret = rknn_outputs_get(ctx, io_num.n_output, outputs, NULL);
  322. //#if PERF_WITH_POST
  323. // post_process((int8_t *)outputs[0].buf, (int8_t *)outputs[1].buf, (int8_t *)outputs[2].buf, height, width,
  324. // box_conf_threshold, nms_threshold, pads, scale_w, scale_h, out_zps, out_scales, &detect_result_group);
  325. //#endif
  326. // ret = rknn_outputs_release(ctx, io_num.n_output, outputs);
  327. //}
  328. //gettimeofday(&stop_time, NULL);
  329. //printf("loop count = %d , average run %f ms\n", test_count,
  330. // (__get_us(stop_time) - __get_us(start_time)) / 1000.0 / test_count);
  331. //deinitPostProcess();
  332. // release
  333. //ret = rknn_destroy(ctx);
  334. // if (model_data)
  335. // {
  336. // free(model_data);
  337. //}
  338. }
  339. return 0;
  340. }

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/很楠不爱3/article/detail/323075
推荐阅读
相关标签
  

闽ICP备14008679号