赞
踩
还记得在这篇文章,我们提到了cv::dnn::readNetFromModelOptimizer可以读取深度学习模型,但是当时并未使用,这个函数可以直接读取openvino格式的模型,我们可以先去下载一个人脸检测的模型。
现在我们直接使用,和之前使用没啥区别,只是模型文件变成了xml和bin文件,使用代码如下:
-
- #include<iostream>
- #include<opencv2/core.hpp>
- #include<opencv2/highgui.hpp>
- #include<opencv2/imgproc.hpp>
- #include<opencv2/opencv.hpp>
- // #include<inference_engine.hpp>
- // #include<ie_extension.h>
- // #include<ie_blob.h>
-
- using namespace std;
- // using namespace InferenceEngine;
- using namespace cv;
- using namespace cv::dnn;
-
- string xml = "./face-detection-0200.xml";
- string bin = "./face-detection-0200.bin";
-
-
- int main() {
- Mat src=cv::imread("321.jpg");
- Net net = readNetFromModelOptimizer(xml, bin);
- net.setPreferableBackend(DNN_BACKEND_INFERENCE_ENGINE);//使用openvino作为推理引擎
- net.setPreferableTarget(DNN_TARGET_CPU);
- Mat blob = blobFromImage(src, 1.0, Size(300, 300), Scalar(), true, false, 5);
- net.setInput(blob);
- float confidenceThreshold = 0.5;
- Mat detection = net.forward();
- vector<double> layerTimings;
- double freq = getTickFrequency() / 1000;
- double time = net.getPerfProfile(layerTimings) / freq;
- cout<<"openvino模型推理时间为:"<<time<<" s"<<endl;
-
- int h = src.size().height;
- int w = src.size().width;
- cv::Mat dectetionMat(detection.size[2], detection.size[3], CV_32F,detection.ptr<float>());
- for (int i = 0; i < dectetionMat.rows;i++) {
- float confidence = dectetionMat.at<float>(i, 2);
- // cout << confidence << endl;
- if (confidence> confidenceThreshold) {
- int idx= dectetionMat.at<float>(i, 1);
- // cout << "idx is " << idx << endl;
- int left= static_cast<int>(dectetionMat.at<float>(i, 3) * w);
- int top = static_cast<int>(dectetionMat.at<float>(i, 4) * h);
- int right = static_cast<int>(dectetionMat.at<float>(i, 5) * w);
- int bottom = static_cast<int>(dectetionMat.at<float>(i, 6) * h);
- cv::rectangle(src,Rect(left,top,right-left,bottom-top),Scalar(255,0,0),2);
- }
- }
- cv::imwrite("3.jpg",src);
- return 0;
- }
运行结果:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。