当前位置:   article > 正文

OpenVino --- C++ 推理流程_person-vehicle-bike-detection-2002.xml

person-vehicle-bike-detection-2002.xml

 

  • openvino 环境配置相关

  • C++案例流程


openvino环境匹配

  1. 1. openvino 2021.3.394
  2. 2. vs2019
  3. 3. cmake3.20.2
  4. 4. python 3.6-3.9 (用于转换其他框架模型到IR, 也支持python推理)

系统环境说明

安装好openvino后主要配置好系统环境变量IE_DIR、ngraph_DIR、TBB_DIR、OPENCV_DIR(openvino自带)以及相关lib库-bin目录;

 

python环境说明

单独创建一个独立的python虚拟环境,直接将openvino自带的一些环境复制到site-packages中,不用按照网上所述的需要pip安装一些openvino包,也不需要添加"PYTHONPATH"系统环境变量。

不用担心相关文件找不到,python 导包的搜索顺序:

  1. 1. 当前目录
  2. 2. 环境变量PYTHONPATH中的目录
  3. 3. Python安装目录第三方库(for Linux OS:/usr/local/lib/python)(win: site-packages下)

C++推理流程

将Inter官方提供的person-vehicle-bike-detection-2002为例:

这里将使用官方提供的FP32模型,上图是模型需要的输入以及输出结果。

  1. #include <inference_engine.hpp>
  2. #include <opencv2/opencv.hpp>
  3. #include <opencv2/dnn.hpp>
  4. #include <iostream>
  5. #include "func.h"
  6. using namespace std;
  7. using namespace cv;
  8. using namespace cv::dnn;
  9. using namespace InferenceEngine;
  10. void test_demo() {
  11. // crerate IE engine, find supported device name
  12. Core ie;
  13. vector<string> availableDevices = ie.GetAvailableDevices();
  14. for (int i = 0; i < availableDevices.size(); i++) {
  15. printf("supported device name : %s \n", availableDevices[i].c_str());
  16. }
  17. // load model
  18. auto network = ie.ReadNetwork("D:/download/intel/person-vehicle-bike-detection-2002/FP32/person-vehicle- bike-detection-2002.xml", "D:/download/intel/person-vehicle-bike-detection-2002/FP32/person-vehicle- bike-detection-2002.bin");
  19. // request network input and output info
  20. InferenceEngine::InputsDataMap input_info(network.getInputsInfo());
  21. InferenceEngine::OutputsDataMap output_info(network.getOutputsInfo());
  22. // set input format
  23. for (auto& item : input_info) {
  24. auto input_data = item.second;
  25. input_data->setPrecision(Precision::FP32);
  26. input_data->setLayout(Layout::NCHW);
  27. input_data->getPreProcess().setResizeAlgorithm(RESIZE_BILINEAR);
  28. input_data->getPreProcess().setColorFormat(ColorFormat::RGB);
  29. }
  30. // set output format
  31. for (auto& item : output_info) {
  32. auto output_data = item.second;
  33. output_data->setPrecision(Precision::FP32);
  34. }
  35. auto executable_network = ie.LoadNetwork(network, "GPU");
  36. // output result
  37. vector<Rect> boxes;
  38. vector<int> classIds;
  39. vector<float> confidences;
  40. // request infer
  41. auto infer_request = executable_network.CreateInferRequest();
  42. Mat src = imread("F:/Downloads/opencv_tutorial_data/images/objects.jpg");
  43. int image_height = src.rows;
  44. int image_width = src.cols;
  45. /** Iterating over all input blobs **/
  46. for (auto& item : input_info) {
  47.        auto input_name = item.first;
  48.        /** Getting input blob **/
  49.        auto input = infer_request.GetBlob(input_name);
  50.        size_t num_channels = input->getTensorDesc().getDims()[1];
  51.        size_t h = input->getTensorDesc().getDims()[2];
  52.        size_t w = input->getTensorDesc().getDims()[3];
  53.        size_t image_size = h * w;
  54.        Mat blob_image;
  55.        resize(src, blob_image, Size(w, h));
  56.        cvtColor(blob_image, blob_image, COLOR_BGR2RGB);
  57.        // NCHW
  58.        float* data = static_cast<float*>(input->buffer());
  59.        for (size_t row = 0; row < h; row++) {
  60.            for (size_t col = 0; col < w; col++) {
  61.                for (size_t ch = 0; ch < num_channels; ch++) {
  62.                    data[image_size * ch + row * w + col] = float(blob_image.at<Vec3b>(row, col)[ch]);
  63.               }
  64.           }
  65.       }
  66. }
  67. int64 start = getTickCount();
  68. // do inference
  69. infer_request.Infer();
  70. for (auto& item : output_info) {
  71. auto output_name = item.first;
  72. printf("output_name : %s \n", output_name.c_str());
  73. // get output blob
  74. auto output = infer_request.GetBlob(output_name);
  75. const float* output_blob = static_cast<PrecisionTrait<Precision::FP32>::value_type*>(output- >buffer());
  76. const SizeVector outputDims = output->getTensorDesc().getDims();
  77. const int out_num = outputDims[2];   // 200
  78. const int out_info = outputDims[3];  // 7
  79. for (int n = 0; n < out_num; n++) {
  80. float conf = output_blob[n * 7 + 2];
  81. if (conf < 0.5) {
  82. continue;
  83. }
  84. int x1 = saturate_cast<int>(output_blob[n * 7 + 3] * image_width);
  85. int y1 = saturate_cast<int>(output_blob[n * 7 + 4] * image_height);
  86. int x2 = saturate_cast<int>(output_blob[n * 7 + 5] * image_width);
  87. int y2 = saturate_cast<int>(output_blob[n * 7 + 6] * image_height);
  88. //label
  89. int label = saturate_cast<int>(output_blob[n * 7 + 1]);
  90. classIds.push_back(label);
  91. confidences.push_back(conf);
  92. boxes.push_back(Rect(x1, y1, x2 - x1, y2 - y1));
  93. }
  94. }
  95. vector<int> indices;
  96. NMSBoxes(boxes, confidences, 0.25, 0.5, indices);
  97. float time = (getTickCount() - start) / getTickFrequency();
  98. printf("time used:%f", time);
  99. for (size_t i = 0; i < indices.size(); ++i)
  100. {
  101. int idx = indices[i];
  102. Rect box = boxes[idx];
  103. rectangle(src, box, Scalar(140, 199, 0), 4, 8, 0);
  104. }
  105. imshow("OpenVINO-test", src);
  106. waitKey(0);
  107. }

最终结果如上图所示(效果一般),推理+后处理一般在30多ms, 在Intel(R) Core(TM) i7-9750H CPU @ 2.60GHz 2.59 GHz上

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/IT小白/article/detail/839793
推荐阅读
相关标签
  

闽ICP备14008679号