当前位置:   article > 正文

静默活体检测+人脸检测+人脸识别结合在NCNN模型下的推理(Windows下的VS环境)_ncnn人脸识别

ncnn人脸识别

 前言:

涉及到三个模型  静默活体检测模型<2M,人脸检测模型<2M  ,人脸识别<5M(模型大小)

至于NCNN不必多说,全C++实现,不依赖第三方库实现,在第三方移动端CPU运行最快。

首先,这是三者结合的推理,这意味着从训练到转ncnn模型全部完成且作用效果的精度达到了不错的要求。

训练在此就省略了,复现的都是都是GitHub上的原项目,然后使用其最终模型进行C++上的推理。

实现的功能:

一个demo实现对单个人的识别。(因为未涉及C++的太多知识,比如通过序列化对象转储到文件并从文件中读取,或者是通过连接数据库进行数据的转储,所以测试的推理的整个流程针对单人。)

步骤:先进行静默活体检测->人脸检测->人脸识别。后面的步骤都是在前面的步骤上实现,识别完后进行静默活体检测开始循环反复。

main函数

  1. #include <stdio.h>
  2. #include <algorithm>
  3. #include <vector>
  4. #include "platform.h"
  5. #include <opencv2/opencv.hpp>
  6. #include <opencv2/core/core.hpp>
  7. #include <opencv2/highgui/highgui.hpp>
  8. #include "iostream"
  9. #include<sstream>
  10. #include "net.h"
  11. #if NCNN_VULKAN
  12. #include "gpu.h"
  13. #endif // NCNN_VULKAN
  14. #define IMAGE_SIZE 80
  15. #define CV_RGB(r,g,b) cvScalar((b),(g),(r),0)
  16. /*声明*/
  17. bool JudgeReal(cv::Mat bgr, ncnn::Extractor ex, ncnn::Net & SilentFaceSpoofing);
  18. cv::Mat GetMyMat(cv::Mat img, const std::string& s);//获取人脸特征
  19. cv::Mat GetMat(const std::string & path);//获取人脸框
  20. cv::Mat GetMat(cv::Mat img);//重载
  21. float* getFeatByMobileFaceNetNCNN(cv::Mat img);//获取特征向量
  22. float CalcSimilarity_1(float* fc1,//比较出相识度
  23. float* fc2,
  24. long dim);
  25. std::string Convert(float Num)
  26. {
  27. std:: ostringstream oss;
  28. oss << Num;
  29. std:: string str(oss.str());
  30. return str;
  31. }
  32. int main(){
  33. /*
  34. 先加载一个人的图片及其特征向量
  35. */
  36. cv::Mat sample = GetMat("./pic/yzh1.jpg");
  37. float *sam= getFeatByMobileFaceNetNCNN( sample);
  38. /*
  39. 加载活体检测模型
  40. */
  41. static ncnn::Net SilentFaceSpoofing;
  42. SilentFaceSpoofing.load_param("./model/2.7_80x80_MiniFASNetV2_sim.param");
  43. SilentFaceSpoofing.load_model("./model/2.7_80x80_MiniFASNetV2_sim.bin");
  44. static ncnn::Extractor ex = SilentFaceSpoofing.create_extractor();
  45. //打开摄像头
  46. cv::VideoCapture capture(0);//创建VideoCapture对象
  47. if (!capture.isOpened())//判断是否打开摄像头,打开isOpened返回ture
  48. return 1;
  49. bool stop(false);//定义一个用来停止循环的变量
  50. cv::Mat frame;//用来存放读取的视频序列,承载每一帧的图像 ,Mat类是用于保存图像以及其他矩阵数据的数据结构
  51. cv::namedWindow("Camera");//创建一个窗口,显示每一帧的窗口
  52. while (!stop)
  53. {
  54. if (!capture.read(frame))//如果没有读取到就中断
  55. {
  56. break;
  57. }
  58. bool result;
  59. result = JudgeReal(frame, ex, SilentFaceSpoofing);//判断真假脸
  60. if (result) {//活体
  61. cv::Mat frame2= GetMat(frame);//获取人脸框
  62. float * fr= getFeatByMobileFaceNetNCNN(frame2);
  63. float res= CalcSimilarity_1(fr, sam, 112);
  64. std::cout << "相识度:" << res << std::endl;
  65. delete[]fr;
  66. if (res > 0.3) { //认为是同一个人
  67. std::string name = "yzh-sim:" + Convert(res);
  68. cv::Mat imgs = GetMyMat(frame, name);
  69. putText(imgs, "realface", cv::Point(0, 100), cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(123, 0, 255), 4, 4);
  70. cv::imshow("Camera", imgs);//正常显示,把获取的视频填充到窗口中
  71. }
  72. else
  73. {
  74. cv::Mat imgs = GetMyMat(frame, "unknow");
  75. putText(imgs, "unkonwrealface", cv::Point(0, 100), cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(123, 0, 255), 4, 4);
  76. cv::imshow("Camera", imgs);//正常显示,把获取的视频填充到窗口中
  77. }
  78. }
  79. else {
  80. putText(frame, "FalseFace", cv::Point(0, 100), cv::FONT_HERSHEY_SIMPLEX, 2, cv::Scalar(123, 0, 255), 4, 4);
  81. cv::imshow("Camera", frame);//正常显示,把获取的视频填充到窗口中
  82. std::cout << result << std::endl;
  83. }
  84. char c = cvWaitKey(33);
  85. if (c == 32)break; //使用空格键来停止ASCII 为32 关闭摄像头
  86. }
  87. capture.release();//释放
  88. return 0;
  89. }

获取人的特征向量以及比较人的相识度(mobilefacenet网络实现识别)

  1. #include<iostream>
  2. #include "net.h"
  3. #include <opencv2/opencv.hpp>
  4. using namespace std;
  5. static int flag = 0;
  6. //Mobilefacenet 初始化
  7. float simd_dot_1(const float* x, const float* y, const long& len) {
  8. float inner_prod = 0.0f;
  9. __m128 X, Y; // 128-bit values
  10. __m128 acc = _mm_setzero_ps(); // set to (0, 0, 0, 0)
  11. float temp[4];
  12. long i;
  13. for (i = 0; i + 4 < len; i += 4) {
  14. X = _mm_loadu_ps(x + i); // load chunk of 4 floats
  15. Y = _mm_loadu_ps(y + i);
  16. acc = _mm_add_ps(acc, _mm_mul_ps(X, Y));
  17. }
  18. _mm_storeu_ps(&temp[0], acc); // store acc into an array
  19. inner_prod = temp[0] + temp[1] + temp[2] + temp[3];
  20. // add the remaining values
  21. for (; i < len; ++i) {
  22. inner_prod += x[i] * y[i];
  23. }
  24. return inner_prod;
  25. }
  26. float CalcSimilarity_1( float* fc1,
  27. float* fc2,
  28. long dim) {
  29. return simd_dot_1(fc1, fc2, dim)
  30. / (sqrt(simd_dot_1(fc1, fc1, dim))
  31. * sqrt(simd_dot_1(fc2, fc2, dim)));
  32. }
  33. float* getFeatByMobileFaceNetNCNN( cv::Mat img)
  34. {
  35. static ncnn::Net Recognet;
  36. if (!flag) {
  37. Recognet.load_param("./model/mobilefacenet.param");
  38. Recognet.load_model("./model/mobilefacenet.bin");
  39. }
  40. ncnn::Extractor ex= Recognet.create_extractor();
  41. ex.set_light_mode(true);
  42. ex.set_num_threads(4);
  43. //cout << "getFeatByMobileFaceNetNCNN" << endl;
  44. float* feat = new float[512];
  45. ncnn::Mat in = ncnn::Mat::from_pixels_resize(img.data, ncnn::Mat::PIXEL_BGR, img.cols, img.rows, 112, 112);
  46. ex.input("data", in);
  47. ncnn::Mat out;
  48. ex.extract("fc1", out);
  49. //std::vector<float> cls_scores;
  50. for (int j = 0; j < out.w; j++)
  51. {
  52. feat[j] = out[j];
  53. }
  54. return feat;
  55. }

静默活体识别引用函数(实现预处理)

  1. #include <stdio.h>
  2. #include <algorithm>
  3. #include <vector>
  4. #include <opencv2/core/core.hpp>
  5. #include <opencv2/highgui/highgui.hpp>
  6. #include "iostream"
  7. #include "platform.h"
  8. #include "net.h"
  9. #if NCNN_VULKAN
  10. #include "gpu.h"
  11. #endif // NCNN_VULKAN
  12. #define IMAGE_SIZE 80
  13. bool JudgeReal(cv::Mat bgr, ncnn::Extractor ex, ncnn::Net& SilentFaceSpoofing) {
  14. std::vector<float> cls_scores;
  15. ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, IMAGE_SIZE, IMAGE_SIZE);
  16. fprintf(stderr, "input shape: %d %d %d %d\n", in.dims, in.h, in.w, in.c);
  17. ex.input("input", in);//input 是 .param文件中输入节点名称
  18. ncnn::Mat out;
  19. ex.extract("output", out);
  20. {
  21. ncnn::Layer* softmax = ncnn::create_layer("Softmax");
  22. ncnn::ParamDict pb;
  23. softmax->load_param(pb);
  24. softmax->forward_inplace(out, SilentFaceSpoofing.opt);
  25. delete softmax;
  26. }
  27. out = out.reshape(out.h * out.w * out.c);
  28. fprintf(stderr, "output shape: %d %d %d %d\n", out.dims, out.h, out.w, out.c);
  29. cls_scores.resize(out.w);
  30. for (int j = 0; j < out.w; j++)
  31. {
  32. cls_scores[j] = out[j];
  33. printf("cls_scores[%d]=%f\n", j, cls_scores[j]);
  34. }
  35. std::cout << std::endl;
  36. auto itMax = max_element(cls_scores.begin(), cls_scores.end());
  37. std::cout << "the max:" << *itMax << " the location:" << distance(cls_scores.begin(), itMax) << std::endl;
  38. if (distance(cls_scores.begin(), itMax) == 1) {
  39. std::cout << "Image " << distance(cls_scores.begin(), itMax) << " is Real Face. Score: " << *itMax << std::endl;
  40. return true;
  41. }
  42. else {
  43. std::cout << "Image " << distance(cls_scores.begin(), itMax) << " is Fake Face. Score: " << *itMax << std::endl;
  44. return false;
  45. }
  46. }

 获取图像中的人脸框及其特征点 (retinaface网络实现检测,这里的boxsize是图片中检测的人脸个数,由于本次demo只针对单人,所以返回的总是最后一个人的检测框)

  1. #include <stdio.h>
  2. #include <algorithm>
  3. #include <vector>
  4. #include <opencv2/core/core.hpp>
  5. #include <opencv2/highgui/highgui.hpp>
  6. #include <opencv2/opencv.hpp>
  7. #include <fstream>
  8. #include "FaceDetector.h"
  9. using namespace std;
  10. static bbox mybox;
  11. //cv::Mat GetMat()
  12. /*
  13. 图像预处理
  14. 获取框选后的人脸图像
  15. */
  16. cv::Mat GetMat(const std::string & path)
  17. {
  18. string imgPath = path;
  19. static string param = "./model/face.param";
  20. static string bin = "./model/face.bin";
  21. static const int max_side = 320;
  22. static Detector detector(param, bin, false);
  23. // retinaface
  24. // Detector detector(param, bin, true);
  25. Timer timer;
  26. cv::Mat img = cv::imread(imgPath.c_str(), CV_LOAD_IMAGE_COLOR);
  27. // scale
  28. float long_side = max(img.cols, img.rows);
  29. float scale = max_side/long_side;
  30. cv::Mat img_scale;
  31. cv::Size size = cv::Size(img.cols*scale, img.rows*scale);
  32. cv::resize(img, img_scale, cv::Size(img.cols*scale, img.rows*scale));
  33. if (img.empty())
  34. {
  35. fprintf(stderr, "cv::imread %s failed\n", imgPath.c_str());
  36. }
  37. std::vector<bbox> boxes;
  38. timer.tic();
  39. detector.Detect(img_scale, boxes);
  40. timer.toc("----total timer:");
  41. if (!boxes.size()) {
  42. cv::Mat temp;
  43. cv::resize(img, temp, cv::Size(112, 112));
  44. return temp;
  45. }
  46. cv::Mat temps;
  47. // draw image 裁剪出图片中人脸框
  48. for (int j = 0; j < boxes.size(); ++j) {
  49. cv::Rect rect(boxes[j].x1 / scale, boxes[j].y1 / scale, boxes[j].x2 / scale - boxes[j].x1 / scale, boxes[j].y2 / scale - boxes[j].y1 / scale);
  50. cv::rectangle(img, rect, cv::Scalar(0, 0, 255), 1, 8, 0);
  51. // char test[80];
  52. cv::Mat m = img(rect);
  53. cv::resize(m, temps, cv::Size(112, 112));
  54. mybox = boxes[j];
  55. }
  56. return temps;
  57. }
  58. cv::Mat GetMat( cv::Mat img)
  59. {
  60. static string param = "./model/face.param";
  61. static string bin = "./model/face.bin";
  62. static const int max_side = 320;
  63. static Detector detector(param, bin, false);
  64. // retinaface
  65. // Detector detector(param, bin, true);
  66. Timer timer;
  67. // scale
  68. float long_side = max(img.cols, img.rows);
  69. float scale = max_side / long_side;
  70. cv::Mat img_scale;
  71. cv::Size size = cv::Size(img.cols * scale, img.rows * scale);
  72. cv::resize(img, img_scale, cv::Size(img.cols * scale, img.rows * scale));
  73. if (img.empty())
  74. {
  75. std::cout << "empty img" << std::endl;
  76. }
  77. std::vector<bbox> boxes;
  78. timer.tic();
  79. detector.Detect(img_scale, boxes);
  80. timer.toc("----total timer:");
  81. if (!boxes.size()) {
  82. cv::Mat temp;
  83. cv::resize(img, temp, cv::Size(112, 112));
  84. return temp;
  85. }
  86. cv::Mat temps;
  87. // draw image 裁剪出图片中人脸框
  88. for (int j = 0; j < boxes.size(); ++j) {
  89. cv::Rect rect(boxes[j].x1 / scale, boxes[j].y1 / scale, boxes[j].x2 / scale - boxes[j].x1 / scale, boxes[j].y2 / scale - boxes[j].y1 / scale);
  90. cv::rectangle(img, rect, cv::Scalar(0, 0, 255), 1, 8, 0);
  91. // char test[80];
  92. cv::Mat m = img(rect);
  93. cv::resize(m, temps, cv::Size(112, 112));
  94. mybox = boxes[j];
  95. }
  96. return temps;
  97. }
  98. /*
  99. 获取图像框
  100. */
  101. cv::Mat GetMyMat( cv::Mat img, const std::string &s) {
  102. float long_side = max(img.cols, img.rows);
  103. float scale = 320 / long_side;
  104. cv::Rect rect(mybox.x1 / scale, mybox.y1 / scale, mybox.x2 / scale - mybox.x1 / scale, mybox.y2 / scale - mybox.y1 / scale);
  105. cv::rectangle(img, rect, cv::Scalar(0, 0, 255), 3, 8, 0);
  106. cv::putText(img, s, cv::Size((mybox.x1 / scale), mybox.y1 / scale), cv::FONT_HERSHEY_COMPLEX, 0.5, cv::Scalar(0, 255, 255));
  107. cv::circle(img, cv::Point(mybox.point[0]._x / scale, mybox.point[0]._y / scale), 1, cv::Scalar(0, 0, 225), 4);
  108. cv::circle(img, cv::Point(mybox.point[1]._x / scale, mybox.point[1]._y / scale), 1, cv::Scalar(0, 255, 225), 4);
  109. cv::circle(img, cv::Point(mybox.point[2]._x / scale, mybox.point[2]._y / scale), 1, cv::Scalar(255, 0, 225), 4);
  110. cv::circle(img, cv::Point(mybox.point[3]._x / scale, mybox.point[3]._y / scale), 1, cv::Scalar(0, 255, 0), 4);
  111. cv::circle(img, cv::Point(mybox.point[4]._x / scale, mybox.point[4]._y / scale), 1, cv::Scalar(255, 0, 0), 4);
  112. return img;
  113. }

  1. #include <algorithm>
  2. //#include "omp.h"
  3. #include "FaceDetector.h"
  4. using namespace std;
  5. Detector::Detector():
  6. _nms(0.4),
  7. _threshold(0.6),
  8. _mean_val{104.f, 117.f, 123.f},
  9. _retinaface(false),
  10. Net(new ncnn::Net())
  11. {
  12. }
  13. inline void Detector::Release(){
  14. if (Net != nullptr)
  15. {
  16. delete Net;
  17. Net = nullptr;
  18. }
  19. }
  20. Detector::Detector(const std::string &model_param, const std::string &model_bin, bool retinaface):
  21. _nms(0.4),
  22. _threshold(0.6),
  23. _mean_val{104.f, 117.f, 123.f},
  24. _retinaface(retinaface),
  25. Net(new ncnn::Net())
  26. {
  27. Init(model_param, model_bin);
  28. }
  29. void Detector::Init(const std::string &model_param, const std::string &model_bin)
  30. {
  31. int ret = Net->load_param(model_param.c_str());
  32. ret = Net->load_model(model_bin.c_str());
  33. }
  34. void Detector::Detect(cv::Mat& bgr, std::vector<bbox>& boxes)
  35. {
  36. Timer timer;
  37. timer.tic();
  38. ncnn::Mat in = ncnn::Mat::from_pixels_resize(bgr.data, ncnn::Mat::PIXEL_BGR, bgr.cols, bgr.rows, bgr.cols, bgr.rows);
  39. in.substract_mean_normalize(_mean_val, 0);
  40. timer.toc("precoss:");
  41. timer.tic();
  42. ncnn::Extractor ex = Net->create_extractor();
  43. ex.set_light_mode(true);
  44. ex.set_num_threads(4);
  45. ex.input(0, in);
  46. ncnn::Mat out, out1, out2;
  47. // loc
  48. ex.extract("output0", out);
  49. // class
  50. ex.extract("530", out1);
  51. //landmark
  52. ex.extract("529", out2);
  53. timer.toc("det:");
  54. std::vector<box> anchor;
  55. timer.tic();
  56. if (_retinaface)
  57. create_anchor_retinaface(anchor, bgr.cols, bgr.rows);
  58. else
  59. create_anchor(anchor, bgr.cols, bgr.rows);
  60. timer.toc("anchor:");
  61. std::vector<bbox > total_box;
  62. float *ptr = out.channel(0);
  63. float *ptr1 = out1.channel(0);
  64. float *landms = out2.channel(0);
  65. // #pragma omp parallel for num_threads(2)
  66. for (int i = 0; i < anchor.size(); ++i)
  67. {
  68. if (*(ptr1+1) > _threshold)
  69. {
  70. box tmp = anchor[i];
  71. box tmp1;
  72. bbox result;
  73. // loc and conf
  74. tmp1.cx = tmp.cx + *ptr * 0.1 * tmp.sx;
  75. tmp1.cy = tmp.cy + *(ptr+1) * 0.1 * tmp.sy;
  76. tmp1.sx = tmp.sx * exp(*(ptr+2) * 0.2);
  77. tmp1.sy = tmp.sy * exp(*(ptr+3) * 0.2);
  78. result.x1 = (tmp1.cx - tmp1.sx/2) * in.w;
  79. if (result.x1<0)
  80. result.x1 = 0;
  81. result.y1 = (tmp1.cy - tmp1.sy/2) * in.h;
  82. if (result.y1<0)
  83. result.y1 = 0;
  84. result.x2 = (tmp1.cx + tmp1.sx/2) * in.w;
  85. if (result.x2>in.w)
  86. result.x2 = in.w;
  87. result.y2 = (tmp1.cy + tmp1.sy/2)* in.h;
  88. if (result.y2>in.h)
  89. result.y2 = in.h;
  90. result.s = *(ptr1 + 1);
  91. // landmark
  92. for (int j = 0; j < 5; ++j)
  93. {
  94. result.point[j]._x =( tmp.cx + *(landms + (j<<1)) * 0.1 * tmp.sx ) * in.w;
  95. result.point[j]._y =( tmp.cy + *(landms + (j<<1) + 1) * 0.1 * tmp.sy ) * in.h;
  96. }
  97. total_box.push_back(result);
  98. }
  99. ptr += 4;
  100. ptr1 += 2;
  101. landms += 10;
  102. }
  103. std::sort(total_box.begin(), total_box.end(), cmp);
  104. nms(total_box, _nms);
  105. printf("%d\n", (int)total_box.size());
  106. for (int j = 0; j < total_box.size(); ++j)
  107. {
  108. boxes.push_back(total_box[j]);
  109. }
  110. }
  111. inline bool Detector::cmp(bbox a, bbox b) {
  112. if (a.s > b.s)
  113. return true;
  114. return false;
  115. }
  116. inline void Detector::SetDefaultParams(){
  117. _nms = 0.4;
  118. _threshold = 0.6;
  119. _mean_val[0] = 104;
  120. _mean_val[1] = 117;
  121. _mean_val[2] = 123;
  122. Net = nullptr;
  123. }
  124. Detector::~Detector(){
  125. Release();
  126. }
  127. void Detector::create_anchor(std::vector<box> &anchor, int w, int h)
  128. {
  129. // anchor.reserve(num_boxes);
  130. anchor.clear();
  131. std::vector<std::vector<int> > feature_map(4), min_sizes(4);
  132. float steps[] = {8, 16, 32, 64};
  133. for (int i = 0; i < feature_map.size(); ++i) {
  134. feature_map[i].push_back(ceil(h/steps[i]));
  135. feature_map[i].push_back(ceil(w/steps[i]));
  136. }
  137. std::vector<int> minsize1 = {10, 16, 24};
  138. min_sizes[0] = minsize1;
  139. std::vector<int> minsize2 = {32, 48};
  140. min_sizes[1] = minsize2;
  141. std::vector<int> minsize3 = {64, 96};
  142. min_sizes[2] = minsize3;
  143. std::vector<int> minsize4 = {128, 192, 256};
  144. min_sizes[3] = minsize4;
  145. for (int k = 0; k < feature_map.size(); ++k)
  146. {
  147. std::vector<int> min_size = min_sizes[k];
  148. for (int i = 0; i < feature_map[k][0]; ++i)
  149. {
  150. for (int j = 0; j < feature_map[k][1]; ++j)
  151. {
  152. for (int l = 0; l < min_size.size(); ++l)
  153. {
  154. float s_kx = min_size[l]*1.0/w;
  155. float s_ky = min_size[l]*1.0/h;
  156. float cx = (j + 0.5) * steps[k]/w;
  157. float cy = (i + 0.5) * steps[k]/h;
  158. box axil = {cx, cy, s_kx, s_ky};
  159. anchor.push_back(axil);
  160. }
  161. }
  162. }
  163. }
  164. }
  165. void Detector::create_anchor_retinaface(std::vector<box> &anchor, int w, int h)
  166. {
  167. // anchor.reserve(num_boxes);
  168. anchor.clear();
  169. std::vector<std::vector<int> > feature_map(3), min_sizes(3);
  170. float steps[] = {8, 16, 32};
  171. for (int i = 0; i < feature_map.size(); ++i) {
  172. feature_map[i].push_back(ceil(h/steps[i]));
  173. feature_map[i].push_back(ceil(w/steps[i]));
  174. }
  175. std::vector<int> minsize1 = {10, 20};
  176. min_sizes[0] = minsize1;
  177. std::vector<int> minsize2 = {32, 64};
  178. min_sizes[1] = minsize2;
  179. std::vector<int> minsize3 = {128, 256};
  180. min_sizes[2] = minsize3;
  181. for (int k = 0; k < feature_map.size(); ++k)
  182. {
  183. std::vector<int> min_size = min_sizes[k];
  184. for (int i = 0; i < feature_map[k][0]; ++i)
  185. {
  186. for (int j = 0; j < feature_map[k][1]; ++j)
  187. {
  188. for (int l = 0; l < min_size.size(); ++l)
  189. {
  190. float s_kx = min_size[l]*1.0/w;
  191. float s_ky = min_size[l]*1.0/h;
  192. float cx = (j + 0.5) * steps[k]/w;
  193. float cy = (i + 0.5) * steps[k]/h;
  194. box axil = {cx, cy, s_kx, s_ky};
  195. anchor.push_back(axil);
  196. }
  197. }
  198. }
  199. }
  200. }
  201. void Detector::nms(std::vector<bbox> &input_boxes, float NMS_THRESH)
  202. {
  203. std::vector<float>vArea(input_boxes.size());
  204. for (int i = 0; i < int(input_boxes.size()); ++i)
  205. {
  206. vArea[i] = (input_boxes.at(i).x2 - input_boxes.at(i).x1 + 1)
  207. * (input_boxes.at(i).y2 - input_boxes.at(i).y1 + 1);
  208. }
  209. for (int i = 0; i < int(input_boxes.size()); ++i)
  210. {
  211. for (int j = i + 1; j < int(input_boxes.size());)
  212. {
  213. float xx1 = max(input_boxes[i].x1, input_boxes[j].x1);
  214. float yy1 = max(input_boxes[i].y1, input_boxes[j].y1);
  215. float xx2 = min(input_boxes[i].x2, input_boxes[j].x2);
  216. float yy2 = min(input_boxes[i].y2, input_boxes[j].y2);
  217. float w = max(float(0), xx2 - xx1 + 1);
  218. float h = max(float(0), yy2 - yy1 + 1);
  219. float inter = w * h;
  220. float ovr = inter / (vArea[i] + vArea[j] - inter);
  221. if (ovr >= NMS_THRESH)
  222. {
  223. input_boxes.erase(input_boxes.begin() + j);
  224. vArea.erase(vArea.begin() + j);
  225. }
  226. else
  227. {
  228. j++;
  229. }
  230. }
  231. }
  232. }

效果:

真脸

最后:

静默活体检测还存在一定误差。感谢各位大佬的开源,让作为菜鸟的我完成了缝合。

参考:

38、静默活体检测测试及ncnn、mnn部署_sxj731533730的博客-CSDN博客_ncnn和mnn

deepinsight/insightface: State-of-the-art 2D and 3D Face Analysis Project (github.com)

GitHub - biubug6/Pytorch_Retinaface: Retinaface get 80.99% in widerface hard val using mobilenet0.25.

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/从前慢现在也慢/article/detail/284947?site
推荐阅读
相关标签
  

闽ICP备14008679号