赞
踩
下面是使用 opencv-camera,实时处理区域内人脸检测 android 推理 demo。
首先是整合 opcv-camera 进去:
为了方便直接将整个 opencv-android-sdk 全部导入:
然后在原来的项目模块app中添加 opencv的 java 相关依赖,主要添加红色两行:
app/build.grandle
- dependencies {
- implementation fileTree(dir: 'libs', include: ['*.jar'])
- implementation 'androidx.appcompat:appcompat:1.4.1'
- implementation 'com.google.android.material:material:1.5.0'
- implementation 'androidx.constraintlayout:constraintlayout:2.1.3'
- testImplementation 'junit:junit:4.13.2'
- androidTestImplementation 'androidx.test.ext:junit:1.1.3'
- androidTestImplementation 'androidx.test.espresso:espresso-core:3.4.0'
- implementation project(':opencvsdk')
- }
最后在项目中要使用opencv的地方加载jni库,可以添加到 MainActivity 中:
System.loadLibrary("opencv_java4"); 或者 OpenCVLoader.initDebug();
要使用 opencv-camera,MainActivity 继承 CameraActivity,然后在回调函数中获取每一帧进行处理,比如下面对每一帧添加识别区域边框:
- // 获取每一帧回调数据
- private CameraBridgeViewBase.CvCameraViewListener2 cameraViewListener2 = new CameraBridgeViewBase.CvCameraViewListener2() {
- @Override
- public void onCameraViewStarted(int width, int height) {
- System.out.println("开始预览 width="+width+",height="+height);
- // 预览界面是 640*480,模型输入时 320*320,计算识别区域坐标
- int detection_x1 = (640 - OnnxUtil.w)/2;
- int detection_x2 = (640 - OnnxUtil.w)/2 + OnnxUtil.w;
- int detection_y1 = (480 - OnnxUtil.h)/2;
- int detection_y2 = (480 - OnnxUtil.h)/2 + OnnxUtil.h;;
- System.out.println("识别区域:"+"("+detection_x1+","+detection_y1+")"+"("+detection_x2+","+detection_y2+")");
- // 缓存识别区域两个点
- detection_p1 = new Point(detection_x1,detection_y1);
- detection_p2 = new Point(detection_x2,detection_y2);
- detection_box_color = new Scalar(255, 0, 0);
- detection_box_tickness = 2;
- }
- @Override
- public void onCameraViewStopped() {}
- @Override
- public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame frame) {
-
- // 获取 cv::Mat
- Mat mat = frame.rgba();
-
- // 标注识别区域
- Imgproc.rectangle(mat, detection_p1, detection_p2,detection_box_color,detection_box_tickness);
-
- return mat;
- }
- };
在界面中开启预览:
- ui资源:
- <org.opencv.android.JavaCamera2View
- android:id="@+id/camera_view"
- app:layout_constraintTop_toTopOf="parent"
- app:layout_constraintLeft_toLeftOf="parent"
- android:layout_width="match_parent"
- android:layout_height="match_parent">
-
- </org.opencv.android.JavaCamera2View>
- java开启预览:
- private BaseLoaderCallback baseLoaderCallback = new BaseLoaderCallback(this) {
- @Override
- public void onManagerConnected(int status) {
- switch (status) {
- case LoaderCallbackInterface.SUCCESS: {
- if (camera2View != null) {
- // 设置前置还是后置摄像头 0后置 1前置
- camera2View.setCameraIndex(cameraId);
- // 注册每一帧回调
- camera2View.setCvCameraViewListener(cameraViewListener2);
- // 显示/关闭 帧率 disableFpsMeter/enableFpsMeter
- // 要修改字体和颜色直接修改 FpsMeter 类即可
- camera2View.enableFpsMeter();
- // 设置视图宽高和模型一致减少resize操作,模型输入一般尺寸不大,这样相机渲染fps会更高
- camera2View.setMaxFrameSize(win_w,win_h);
- // 开启
- camera2View.enableView();
- }
- }
- break;
- default:
- super.onManagerConnected(status);
- break;
- }
- }
- };
下面是全部推理 MainActivity 代码:
- package com.example.camera_opencv;
-
-
- import android.content.pm.ActivityInfo;
- import android.os.Bundle;
- import android.view.WindowManager;
- import com.example.camera_opencv.databinding.ActivityMainBinding;
- import org.opencv.android.*;
- import org.opencv.core.Mat;
- import org.opencv.core.Point;
- import org.opencv.core.Scalar;
- import org.opencv.imgproc.Imgproc;
-
- import java.util.Arrays;
- import java.util.List;
-
- public class MainActivity extends CameraActivity{
-
- // 动态库
- static {
- // 我们自己的jni
- System.loadLibrary("camera_opencv");
- // 新加的 opencv 的jni
- System.loadLibrary("opencv_java4");
- }
-
- private ActivityMainBinding binding;
-
- // 预览界面
- private JavaCamera2View camera2View;
-
- // 相机编号 0后置 1前置
- private int cameraId = 1;
-
- // 设置预览界面宽高,在次宽高基础上限制识别区域
- private int win_w = 640;
- private int win_h = 480;
-
- // 识别区域两个点
- private Point detection_p1;
- private Point detection_p2;
- private Scalar detection_box_color;
- private int detection_box_tickness;
-
- @Override
- protected void onCreate(Bundle savedInstanceState) {
-
- super.onCreate(savedInstanceState);
- binding = ActivityMainBinding.inflate(getLayoutInflater());
- setContentView(binding.getRoot());
-
- // 加载模型
- OnnxUtil.loadModule(getAssets());
-
- // 强制横屏
- setRequestedOrientation(ActivityInfo.SCREEN_ORIENTATION_LANDSCAPE);
- // 隐藏上方状态栏
- getWindow().setFlags(WindowManager.LayoutParams.FLAG_FULLSCREEN, WindowManager.LayoutParams.FLAG_FULLSCREEN);
- // 预览界面
- camera2View = findViewById(R.id.camera_view);
- }
-
- @Override
- protected List<? extends CameraBridgeViewBase> getCameraViewList() {
- return Arrays.asList(camera2View);
- }
-
-
- @Override
- public void onPause() {
- super.onPause();
- if (camera2View != null) {
- // 关闭预览
- camera2View.disableView();
- }
- }
-
- @Override
- public void onResume() {
- super.onResume();
- if (OpenCVLoader.initDebug()) {
- baseLoaderCallback.onManagerConnected(LoaderCallbackInterface.SUCCESS);
- } else {
- OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION, this, baseLoaderCallback);
- }
- }
-
- // 获取每一帧回调数据
- private CameraBridgeViewBase.CvCameraViewListener2 cameraViewListener2 = new CameraBridgeViewBase.CvCameraViewListener2() {
- @Override
- public void onCameraViewStarted(int width, int height) {
- System.out.println("开始预览 width="+width+",height="+height);
- // 预览界面是 640*480,模型输入时 320*320,计算识别区域坐标
- int detection_x1 = (640 - OnnxUtil.w)/2;
- int detection_x2 = (640 - OnnxUtil.w)/2 + OnnxUtil.w;
- int detection_y1 = (480 - OnnxUtil.h)/2;
- int detection_y2 = (480 - OnnxUtil.h)/2 + OnnxUtil.h;;
- System.out.println("识别区域:"+"("+detection_x1+","+detection_y1+")"+"("+detection_x2+","+detection_y2+")");
- // 缓存识别区域两个点
- detection_p1 = new Point(detection_x1,detection_y1);
- detection_p2 = new Point(detection_x2,detection_y2);
- detection_box_color = new Scalar(255, 0, 0);
- detection_box_tickness = 2;
- }
- @Override
- public void onCameraViewStopped() {}
- @Override
- public Mat onCameraFrame(CameraBridgeViewBase.CvCameraViewFrame frame) {
-
- // 获取 cv::Mat
- Mat mat = frame.rgba();
-
- // 标注识别区域
- Imgproc.rectangle(mat, detection_p1, detection_p2,detection_box_color,detection_box_tickness);
-
- // 推理并标注
- OnnxUtil.inference(mat,detection_p1,detection_p2);
-
- return mat;
- }
- };
-
- // 开启预览
- private BaseLoaderCallback baseLoaderCallback = new BaseLoaderCallback(this) {
- @Override
- public void onManagerConnected(int status) {
- switch (status) {
- case LoaderCallbackInterface.SUCCESS: {
- if (camera2View != null) {
- // 设置前置还是后置摄像头 0后置 1前置
- camera2View.setCameraIndex(cameraId);
- // 注册每一帧回调
- camera2View.setCvCameraViewListener(cameraViewListener2);
- // 显示/关闭 帧率 disableFpsMeter/enableFpsMeter
- // 要修改字体和颜色直接修改 FpsMeter 类即可
- camera2View.enableFpsMeter();
- // 设置视图宽高和模型一致减少resize操作,模型输入一般尺寸不大,这样相机渲染fps会更高
- camera2View.setMaxFrameSize(win_w,win_h);
- // 开启
- camera2View.enableView();
- }
- }
- break;
- default:
- super.onManagerConnected(status);
- break;
- }
- }
- };
-
- }
onnx 模型加载和推理代码:
使用的微软onnx推理框架:
implementation 'com.microsoft.onnxruntime:onnxruntime-android:latest.release'
implementation 'com.microsoft.onnxruntime:onnxruntime-extensions-android:latest.release'
- package com.example.camera_opencv;
-
- import ai.onnxruntime.*;
- import android.content.res.AssetManager;
- import org.opencv.core.*;
- import org.opencv.dnn.Dnn;
- import org.opencv.imgproc.Imgproc;
-
- import java.io.ByteArrayOutputStream;
- import java.io.InputStream;
- import java.nio.FloatBuffer;
- import java.util.*;
-
- public class OnnxUtil {
-
- // onnxruntime 环境
- public static OrtEnvironment env;
- public static OrtSession session;
-
- // 模型输入
- public static int w = 0;
- public static int h = 0;
- public static int c = 3;
-
- // 标注颜色
- public static Scalar green = new Scalar(0, 255, 0);
- public static int tickness = 2;
-
- // 模型加载
- public static void loadModule(AssetManager assetManager){
-
- // 下面包含了多个模型
- // yolov5face-blazeface-640x640.onnx 3.4Mb
- // yolov5face-l-640x640.onnx 181Mb
- // yolov5face-m-640x640.onnx 83Mb
- // yolov5face-n-0.5-320x320.onnx 2.5Mb
- // yolov5face-n-0.5-640x640.onnx 4.6Mb
- // yolov5face-n-640x640.onnx 9.5Mb
- // yolov5face-s-640x640.onnx 30Mb
-
- w = 320;
- h = 320;
- c = 3;
-
- try {
- // 模型输入: input -> [1, 3, 320, 320] -> FLOAT
- // 模型输出: output -> [1, 6300, 16] -> FLOAT
- InputStream inputStream = assetManager.open("yolov5face-n-0.5-320x320.onnx");
- ByteArrayOutputStream buffer = new ByteArrayOutputStream();
- int nRead;
- byte[] data = new byte[1024];
- while ((nRead = inputStream.read(data, 0, data.length)) != -1) {
- buffer.write(data, 0, nRead);
- }
- buffer.flush();
- byte[] module = buffer.toByteArray();
- System.out.println("开始加载模型");
- env = OrtEnvironment.getEnvironment();
- session = env.createSession(module, new OrtSession.SessionOptions());
- session.getInputInfo().entrySet().stream().forEach(n -> {
- String inputName = n.getKey();
- NodeInfo inputInfo = n.getValue();
- long[] shape = ((TensorInfo) inputInfo.getInfo()).getShape();
- String javaType = ((TensorInfo) inputInfo.getInfo()).type.toString();
- System.out.println("模型输入: "+inputName + " -> " + Arrays.toString(shape) + " -> " + javaType);
- });
- session.getOutputInfo().entrySet().stream().forEach(n -> {
- String outputName = n.getKey();
- NodeInfo outputInfo = n.getValue();
- long[] shape = ((TensorInfo) outputInfo.getInfo()).getShape();
- String javaType = ((TensorInfo) outputInfo.getInfo()).type.toString();
- System.out.println("模型输出: "+outputName + " -> " + Arrays.toString(shape) + " -> " + javaType);
- });
- } catch (Exception e) {
- e.printStackTrace();
- }
-
- }
-
-
- // 模型推理,输入原始图片和识别区域两个点
- public static void inference(Mat mat,Point detection_p1,Point detection_p2){
-
- int px = Double.valueOf(detection_p1.x).intValue();
- int py = Double.valueOf(detection_p1.y).intValue();
-
- // 提取rgb(chw存储)并做归一化,也就是 rrrrr bbbbb ggggg
- float[] chw = new float[c*h*w];
- // 像素点索引
- int index = 0;
- for(int j=0 ; j<h ; j++){
- for(int i=0 ; i<w ; i++){
- // 第j行,第i列,根据识别区域p1得到xy坐标的偏移,直接加就行
- double[] rgb = mat.get(j+py,i+px);
- // 缓存到 chw 中,mat 是 rgba 数据对应的下标 2103
- chw[index] = (float)(rgb[2]/255);//r
- chw[index + w * h * 1 ] = (float)(rgb[1]/255);//G
- chw[index + w * h * 2 ] = (float)(rgb[0]/255);//b
- index ++;
- }
- }
-
- // 创建张量并进行推理
- try {
-
- OnnxTensor tensor = OnnxTensor.createTensor(env, FloatBuffer.wrap(chw), new long[]{1,c,h,w});
- OrtSession.Result output = session.run(Collections.singletonMap("input", tensor));
- float[][] out = ((float[][][])(output.get(0)).getValue())[0];
-
- ArrayList<float[]> datas = new ArrayList<>();
-
- for(int i=0;i<out.length;i++){
-
- float[] data = out[i];
-
- float score1 = data[4]; // 边框置信度
- float score2 = data[15];// 人脸置信度
- if( score1 >= 0.2 && score2>= 0.2){
- // xywh 转 x1y1x2y2
- float xx = data[0];
- float yy = data[1];
- float ww = data[2];
- float hh = data[3];
- float[] xyxy = xywh2xyxy(new float[]{xx,yy,ww,hh},w,h);
- data[0] = xyxy[0];
- data[1] = xyxy[1];
- data[2] = xyxy[2];
- data[3] = xyxy[3];
- datas.add(data);
- }
- }
-
- // nms
- ArrayList<float[]> datas_after_nms = new ArrayList<>();
- while (!datas.isEmpty()){
- float[] max = datas.get(0);
- datas_after_nms.add(max);
- Iterator<float[]> it = datas.iterator();
- while (it.hasNext()) {
- // nsm阈值
- float[] obj = it.next();
- double iou = calculateIoU(max,obj);
- if (iou > 0.5f) {
- it.remove();
- }
- }
- }
-
- // 标注
- datas_after_nms.stream().forEach(n->{
-
- // x y w h score 中心点坐标和分数
- // x y 关键点坐标
- // x y 关键点坐标
- // x y 关键点坐标
- // x y 关键点坐标
- // x y 关键点坐标
- // cls_conf 人脸置信度
-
- // 画边框和关键点需要添加偏移
- int x1 = Float.valueOf(n[0]).intValue() + px;
- int y1 = Float.valueOf(n[1]).intValue() + py;
- int x2 = Float.valueOf(n[2]).intValue() + px;
- int y2 = Float.valueOf(n[3]).intValue() + py;
- Imgproc.rectangle(mat, new Point(x1, y1), new Point(x2, y2), green, tickness);
-
- float point1_x = Float.valueOf(n[5]).intValue() + px;// 关键点1
- float point1_y = Float.valueOf(n[6]).intValue() + py;//
- float point2_x = Float.valueOf(n[7]).intValue() + px;// 关键点2
- float point2_y = Float.valueOf(n[8]).intValue() + py;//
- float point3_x = Float.valueOf(n[9]).intValue() + px;// 关键点3
- float point3_y = Float.valueOf(n[10]).intValue() + py;//
- float point4_x = Float.valueOf(n[11]).intValue() + px;// 关键点4
- float point4_y = Float.valueOf(n[12]).intValue() + py;//
- float point5_x = Float.valueOf(n[13]).intValue() + px;// 关键点5
- float point5_y = Float.valueOf(n[14]).intValue() + py;//
-
- Imgproc.circle(mat, new Point(point1_x, point1_y), 1, green, tickness);
- Imgproc.circle(mat, new Point(point2_x, point2_y), 1, green, tickness);
- Imgproc.circle(mat, new Point(point3_x, point3_y), 1, green, tickness);
- Imgproc.circle(mat, new Point(point4_x, point4_y), 1, green, tickness);
- Imgproc.circle(mat, new Point(point5_x, point5_y), 1, green, tickness);
-
- });
-
- }
- catch (Exception e){
- e.printStackTrace();
- }
- }
-
-
- // 中心点坐标转 xin xmax ymin ymax
- public static float[] xywh2xyxy(float[] bbox,float maxWidth,float maxHeight) {
- // 中心点坐标
- float x = bbox[0];
- float y = bbox[1];
- float w = bbox[2];
- float h = bbox[3];
- // 计算
- float x1 = x - w * 0.5f;
- float y1 = y - h * 0.5f;
- float x2 = x + w * 0.5f;
- float y2 = y + h * 0.5f;
- // 限制在图片区域内
- return new float[]{
- x1 < 0 ? 0 : x1,
- y1 < 0 ? 0 : y1,
- x2 > maxWidth ? maxWidth:x2,
- y2 > maxHeight? maxHeight:y2};
- }
-
- // 计算两个框的交并比
- private static double calculateIoU(float[] box1, float[] box2) {
- // getXYXY() 返回 xmin-0 ymin-1 xmax-2 ymax-3
- double x1 = Math.max(box1[0], box2[0]);
- double y1 = Math.max(box1[1], box2[1]);
- double x2 = Math.min(box1[2], box2[2]);
- double y2 = Math.min(box1[3], box2[3]);
- double intersectionArea = Math.max(0, x2 - x1 + 1) * Math.max(0, y2 - y1 + 1);
- double box1Area = (box1[2] - box1[0] + 1) * (box1[3] - box1[1] + 1);
- double box2Area = (box2[2] - box2[0] + 1) * (box2[3] - box2[1] + 1);
- double unionArea = box1Area + box2Area - intersectionArea;
- return intersectionArea / unionArea;
- }
-
- }
-
-
-
-
项目详细代码:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。