当前位置:   article > 正文

OPENCV例子opencv-4.5.5\samples\dnn\face_detect.cpp的代码分析_facerecognizersf::create

facerecognizersf::create

示例main函数调用情况如下:

 

示例main函数流程图情况如下:

 

示例main函数UML逻辑图情况如下:

 

示例源代码如下:

#include <opencv2/dnn.hpp>

#include <opencv2/imgproc.hpp>

#include <opencv2/highgui.hpp>

#include <opencv2/objdetect.hpp>

#include <iostream>

using namespace cv;

using namespace std;

static

void visualize(Mat& input, int frame, Mat& faces, double fps, int thickness = 2)

{

    std::string fpsString = cv::format("FPS : %.2f", (float)fps);

    if (frame >= 0)

        cout << "Frame " << frame << ", ";

    cout << "FPS: " << fpsString << endl;

    for (int i = 0; i < faces.rows; i++)

    {

        // Print results

        cout << "Face " << i

             << ", top-left coordinates: (" << faces.at<float>(i, 0) << ", " << faces.at<float>(i, 1) << "), "左上角坐标

             << "box width箱宽: " << faces.at<float>(i, 2)  << ", box height箱高: " << faces.at<float>(i, 3) << ", "

             << "score分数: " << cv::format("%.2f", faces.at<float>(i, 14))

             << endl;

        // Draw bounding box绘制边界框

        rectangle(input, Rect2i(int(faces.at<float>(i, 0)), int(faces.at<float>(i, 1)), int(faces.at<float>(i, 2)), int(faces.at<float>(i, 3))), Scalar(0, 255, 0), thickness);

        // Draw landmarks绘制地标

        circle(input, Point2i(int(faces.at<float>(i, 4)), int(faces.at<float>(i, 5))), 2, Scalar(255, 0, 0), thickness);

        circle(input, Point2i(int(faces.at<float>(i, 6)), int(faces.at<float>(i, 7))), 2, Scalar(0, 0, 255), thickness);

        circle(input, Point2i(int(faces.at<float>(i, 8)), int(faces.at<float>(i, 9))), 2, Scalar(0, 255, 0), thickness);

        circle(input, Point2i(int(faces.at<float>(i, 10)), int(faces.at<float>(i, 11))), 2, Scalar(255, 0, 255), thickness);

        circle(input, Point2i(int(faces.at<float>(i, 12)), int(faces.at<float>(i, 13))), 2, Scalar(0, 255, 255), thickness);

    }

    putText(input, fpsString, Point(0, 15), FONT_HERSHEY_SIMPLEX, 0.5, Scalar(0, 255, 0), 2);

}

int main(int argc, char** argv)

{

    CommandLineParser parser(argc, argv,

        "{help  h           |            | Print this message打印此消息}"

        "{image1 i1         |            | Path to the input image1. Omit for detecting through VideoCapture输入图像的路径1。省略用于通过 VideoCapture 检测}"

        "{image2 i2         |            | Path to the input image2. When image1 and image2 parameters given then the program try to find a face on both images and runs face recognition algorithm输入图像2的路径。当给定 image1 image2 参数时,程序会尝试在两个图像上找到人脸并运行人脸识别算法}"

        "{video v           | 0          | Path to the input video输入视频的路径}"

        "{scale sc          | 1.0        | Scale factor used to resize input video frames用于调整输入视频帧大小的比例因子}"

        "{fd_model fd       | yunet.onnx | Path to the model模型路径. Download yunet.onnx in https://github.com/ShiqiYu/libfacedetection.train/tree/master/tasks/task1/onnx }"

        "{fr_model fr       | face_recognizer_fast.onnx | Path to the face recognition model人脸识别模型的路径. Download the model at https://drive.google.com/file/d/1ClK9WiB492c5OZFKveF3XiHCejoOxINW/view}"

        "{score_threshold   | 0.9        | Filter out faces of score过滤掉分数的面孔< score_threshold}"

        "{nms_threshold     | 0.3        | Suppress bounding boxes of iou >= nms_threshold}"

        "{top_k             | 5000       | Keep top_k bounding boxes before NMS}"

        "{save s            | false      | Set true to save results. This flag is invalid when using camera}"

    );

    if (parser.has("help"))

    {

        parser.printMessage();

        return 0;

    }

    String fd_modelPath = parser.get<String>("fd_model");

    String fr_modelPath = parser.get<String>("fr_model");

    float scoreThreshold = parser.get<float>("score_threshold");

    float nmsThreshold = parser.get<float>("nms_threshold");

    int topK = parser.get<int>("top_k");

    bool save = parser.get<bool>("save");

    double cosine_similar_thresh = 0.363;

    double l2norm_similar_thresh = 1.128;

    //! [initialize_FaceDetectorYN]

    // Initialize FaceDetectorYN初始化 FaceDetectorYN

    Ptr<FaceDetectorYN> detector = FaceDetectorYN::create(fd_modelPath, "", Size(320, 320), scoreThreshold, nmsThreshold, topK);

    //! [initialize_FaceDetectorYN]

    TickMeter tm;

    // If input is an image如果输入是图像

    if (parser.has("image1"))

    {

        String input1 = parser.get<String>("image1");

        Mat image1 = imread(samples::findFile(input1));

        if (image1.empty())

        {

            std::cerr << "Cannot read image: " << input1 << std::endl;

            return 2;

        }

        tm.start();

        //! [inference]

        // Set input size before inference在推理之前设置输入大小

        detector->setInputSize(image1.size());

        Mat faces1;

        detector->detect(image1, faces1);

        if (faces1.rows < 1)

        {

            std::cerr << "Cannot find a face in " << input1 << std::endl;

            return 1;

        }

        //! [inference]

        tm.stop();

        // Draw results on the input image在输入图像上绘制结果

        visualize(image1, -1, faces1, tm.getFPS());

        // Save results if save is true

        if (save)

        {

            cout << "Saving result.jpg...\n";

            imwrite("result.jpg", image1);

        }

        // Visualize results可视化结果

        imshow("image1", image1);

        pollKey();  // handle UI events to show content处理 UI 事件以显示内容

        if (parser.has("image2"))

        {

            String input2 = parser.get<String>("image2");

            Mat image2 = imread(samples::findFile(input2));

            if (image2.empty())

            {

                std::cerr << "Cannot read image2: " << input2 << std::endl;

                return 2;

            }

            tm.reset();

            tm.start();

            detector->setInputSize(image2.size());

            Mat faces2;

            detector->detect(image2, faces2);

            if (faces2.rows < 1)

            {

                std::cerr << "Cannot find a face in " << input2 << std::endl;

                return 1;

            }

            tm.stop();

            visualize(image2, -1, faces2, tm.getFPS());

            if (save)

            {

                cout << "Saving result2.jpg...\n";

                imwrite("result2.jpg", image2);

            }

            imshow("image2", image2);

            pollKey();

            //! [initialize_FaceRecognizerSF]

            // Initialize FaceRecognizerSF初始化人脸识别器SF

            Ptr<FaceRecognizerSF> faceRecognizer = FaceRecognizerSF::create(fr_modelPath, "");

            //! [initialize_FaceRecognizerSF]

            //! [facerecognizer]

            // Aligning and cropping facial image through the first face of faces detected. 通过检测到的第一张人脸对齐和裁剪人脸图像。

            Mat aligned_face1, aligned_face2;

            faceRecognizer->alignCrop(image1, faces1.row(0), aligned_face1);

            faceRecognizer->alignCrop(image2, faces2.row(0), aligned_face2);

            // Run feature extraction with given aligned_face使用给定的对齐面运行特征提取

            Mat feature1, feature2;

            faceRecognizer->feature(aligned_face1, feature1);

            feature1 = feature1.clone();

            faceRecognizer->feature(aligned_face2, feature2);

            feature2 = feature2.clone();

            //! [facerecognizer]

            //! [match]

            double cos_score = faceRecognizer->match(feature1, feature2, FaceRecognizerSF::DisType::FR_COSINE);

            double L2_score = faceRecognizer->match(feature1, feature2, FaceRecognizerSF::DisType::FR_NORM_L2);

            //! [match]

            if (cos_score >= cosine_similar_thresh)

            {

                std::cout << "They have the same identity; 他们有相同的身份";

            }

            else

            {

                std::cout << "They have different identities; 他们有不同的身份";

            }

            std::cout << " Cosine Similarity余弦相似度: " << cos_score << ", threshold: " << cosine_similar_thresh << ". (higher value means higher similarity更高的值意味着更高的相似度, max 1.0)\n";

            if (L2_score <= l2norm_similar_thresh)

            {

                std::cout << "They have the same identity;";

            }

            else

            {

                std::cout << "They have different identities.";

            }

            std::cout << " NormL2 Distance: " << L2_score << ", threshold: " << l2norm_similar_thresh << ". (lower value means higher similarity较低的值意味着较高的相似度, min 0.0)\n";

        }

        cout << "Press any key to exit..." << endl;

        waitKey(0);

    }

    else

    {

        int frameWidth, frameHeight;

        float scale = parser.get<float>("scale");

        VideoCapture capture;

        std::string video = parser.get<string>("video");

        if (video.size() == 1 && isdigit(video[0]))

            capture.open(parser.get<int>("video"));

        else

            capture.open(samples::findFileOrKeep(video));  // keep GStreamer pipelines保留 GStreamer 管道

        if (capture.isOpened())

        {

            frameWidth = int(capture.get(CAP_PROP_FRAME_WIDTH) * scale);

            frameHeight = int(capture.get(CAP_PROP_FRAME_HEIGHT) * scale);

            cout << "Video " << video

                << ": width=" << frameWidth

                << ", height=" << frameHeight

                << endl;

        }

        else

        {

            cout << "Could not initialize video capturing无法初始化视频捕获: " << video << "\n";

            return 1;

        }

        detector->setInputSize(Size(frameWidth, frameHeight));

        cout << "Press 'SPACE' to save frame, any other key to exit..." << endl;

        int nFrame = 0;

        for (;;)

        {

            // Get frame

            Mat frame;

            if (!capture.read(frame))

            {

                cerr << "Can't grab frame! Stop\n";

                break;

            }

            resize(frame, frame, Size(frameWidth, frameHeight));

            // Inference

            Mat faces;

            tm.start();

            detector->detect(frame, faces);

            tm.stop();

            Mat result = frame.clone();

            // Draw results on the input image

            visualize(result, nFrame, faces, tm.getFPS());

            // Visualize results

            imshow("Live", result);

            int key = waitKey(1);

            bool saveFrame = save;

            if (key == ' ')

            {

                saveFrame = true;

                key = 0;  // handled

            }

            if (saveFrame)

            {

                std::string frame_name = cv::format("frame_%05d.png", nFrame);

                std::string result_name = cv::format("result_%05d.jpg", nFrame);

                cout << "Saving '" << frame_name << "' and '" << result_name << "' ...\n";

                imwrite(frame_name, frame);

                imwrite(result_name, result);

            }

            ++nFrame;

            if (key > 0)

                break;

        }

        cout << "Processed " << nFrame << " frames" << endl;

    }

    cout << "Done." << endl;

    return 0;

}

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/116193
推荐阅读
相关标签
  

闽ICP备14008679号