赞
踩
预备内容:
ncnn笔记:01)windows+AndriodStudio在安卓机上快速构建一个深度学习分类模型
ncnn笔记:02)ncnn之JNI入门初窥
ncnn官方提供的retinaface权重及参数文件为
mnet.25-opt.bin mnet.25-opt.param
下载地址:https://github.com/nihui/ncnn-assets/tree/master/models,但笔者使用该权重在ubuntu上测试会出现如下错误
javis@javis-HP-280-Pro-G2-MT:~/soft/ncnn/build/examples$ ./retinaface /tmp/p.jpeg
parse magic failed
network graph not ready
find_blob_index_by_name data failed
find_blob_index_by_name face_rpn_cls_prob_reshape_stride32 failed
find_blob_index_by_name face_rpn_bbox_pred_stride32 failed
find_blob_index_by_name face_rpn_landmark_pred_stride32 failed
浮点数例外 (核心已转储)
如下图所示,选择nativa c++,c++ standard选择c++11
本次仅使用cpu做测试,因此无需vulkan,下载通用包ncnn-android-lib.zip解压到cpp目录下,其实只保留armeabi-v7a即可,其为大部分Android机支持类型,不同机型参考
在main文件下新建Assets目录,并把mnet.25-opt.bin mnet.25-opt.param放到此目录下
编写规则可参考项目默认创建的native-lib.cpp,此次需提供2个jni接口
在java的目录下新建一个java类,编写接口如下
package com.example.retainfacencnn;
import android.content.res.AssetManager;
import android.graphics.Bitmap;
public class RetinaFace {
static {
System.loadLibrary("retinafacencnn");
}
//加载模型接口 AssetManager用于加载assert中的权重文件
public native boolean Init(AssetManager mgr);
//模型检测接口 其值=4-box + 5-landmark
public native float[] Detect(Bitmap bitmap);
}
生成对应的jni文件retinaface_jni.cpp
加载模型接口
extern "C" JNIEXPORT jboolean JNICALL Java_com_example_retainfacencnn_RetinaFace_Init(JNIEnv *env, jobject thiz, jobject assetManager) { AAssetManager *mgr = AAssetManager_fromJava(env, assetManager); //init param int ret = retinaface.load_param(mgr, "mnet.25-opt.param"); if (ret != 0) { __android_log_print(ANDROID_LOG_DEBUG, "RetinaFace", "load_param failed"); return JNI_FALSE; } //init bin ret = retinaface.load_model(mgr, "mnet.25-opt.bin"); if (ret != 0) { __android_log_print(ANDROID_LOG_DEBUG, "RetinaFace", "load_model failed"); return JNI_FALSE; } return JNI_TRUE; }
模型检测接口
extern "C" JNIEXPORT jfloatArray JNICALL Java_com_example_retainfacencnn_RetinaFace_Detect(JNIEnv *env, jobject thiz, jobject bitmap) { ncnn::Extractor ex = retinaface.create_extractor(); ncnn::Mat in = ncnn::Mat::from_android_bitmap(env, bitmap, ncnn::Mat::PIXEL_BGR); std::vector<FaceObject> objs = detect_retinaface(retinaface, in); int count = static_cast<int>(objs.size()), ix = 0; if (count <= 0) return nullptr; //result to 1D-array count = static_cast<int>(count * 14); float *face_info = new float[count]; for (auto obj : objs) { face_info[ix++] = obj.rect.x; face_info[ix++] = obj.rect.y; face_info[ix++] = obj.rect.x + obj.rect.width; face_info[ix++] = obj.rect.y + obj.rect.height; for (int j = 0; j < 5; j++) { face_info[ix++] = obj.landmark[j].x; face_info[ix++] = obj.landmark[j].y; } } jfloatArray tFaceInfo = env->NewFloatArray(count); env->SetFloatArrayRegion(tFaceInfo, 0, count, face_info); delete[] face_info; return tFaceInfo; }
代码复用官方提供的例子retinaface,但其使用了opencv相关的函数,若也编译到动态库里,可能会比较臃肿。
struct FaceObject
{
cv::Rect_<float> rect;
cv::Point2f landmark[5];
float prob;
};
新建一个头文件retinaface.hpp,做如下简单替换
#include "net.h" /**************************************opencv code replaced************************************/ template<typename T> class Rect { public: T x, y, width, height; T area() const { return width * height; } static T intersection_area(const Rect<T> &a, const Rect<T> &b){ T x0 = std::max(a.x, b.x), y0 = std::max(a.y, b.y); T x1 = std::min(a.x + a.width, b.x + b.width), y1 = std::min(a.y + a.height, b.y + b.height); return (x1 - x0) * (y1 - y0); } }; struct Point2f { float x, y; }; /**************************************opencv code replaced************************************/ struct FaceObject { Rect<float> rect; Point2f landmark[5]; float prob; }; extern std::vector<FaceObject> detect_retinaface(const ncnn::Net & retinaface,ncnn::Mat in);
retinaface.cpp文件内容基本拷贝于官方代码,只做如下2个修改:
1>求box的面积交集函数
static inline float intersection_area(const FaceObject &a, const FaceObject &b) {
return Rect<float>::intersection_area(a.rect, b.rect);
}
2>检测函数
把模型和输入数据做为参数,并返回目标的坐标信息
std::vector <FaceObject> detect_retinaface(const ncnn::Net &retinaface, ncnn::Mat in){}
在app/build.gradle文件中对应andriod->defaultConfig添加一个ndk节点,声明目标机型为"armeabi-v7a"
ndk {
moduleName "ncnn"
abiFilters "armeabi-v7a"
}
修改app\src\main\cpp\CMakeLists.txt文件,最简内容如下
project(retinafacencnn) cmake_minimum_required(VERSION 3.4.1) # import ncnn library add_library(ncnn STATIC IMPORTED) # change this folder path to yours set_target_properties(ncnn PROPERTIES IMPORTED_LOCATION ${CMAKE_SOURCE_DIR}/ncnn-android-lib/${ANDROID_ABI}/libncnn.a) include_directories(${CMAKE_SOURCE_DIR}/ncnn-android-lib/include/ncnn) # openmp set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -fopenmp -static-openmp") set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -fopenmp -static-openmp") aux_source_directory(. DIR_SRCS) add_library(retinafacencnn SHARED ${DIR_SRCS}) target_link_libraries(retinafacencnn ncnn android z log jnigraphics)
build->rebuild project,若无错误会在debug的文件夹下生成对应的so文件
因为笔者对andriod并不太熟悉,前端界面的事件响应和画图也参考于官方的例子,有兴趣的同学可以直接查看github的代码,下面为java画图的函数
private void showObjects(float[] objects) { // draw objects on bitmap Bitmap rgba = bitmap.copy(Bitmap.Config.ARGB_8888, true); Canvas canvas = new Canvas(rgba); Paint paint = new Paint(); paint.setColor(Color.RED); paint.setStyle(Paint.Style.STROKE); paint.setStrokeWidth(4); for(int i=0;i<objects.length/14;i++){ canvas.drawRect(objects[i*14], objects[i*14+1], objects[i*14+2], objects[i*14+3], paint); } canvas.drawPoints(objects, paint); imageView.setImageBitmap(rgba); }
从结果上来看,模型的效果还是不错的(变脸哥们好像没有检测到:-D)
github:https://github.com/JavisPeng/ncnn-andriod-retainface
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。