当前位置:   article > 正文

arm linux系统使用paddlelite部署yolov5(python demo)_yolov5在arm

yolov5在arm

arm linux系统使用paddlelite部署yolov5(python demo)

1.编译环境

arm linux系统利用源码编译paddlelite
详见link

# 下载 Paddle Lite 源码并切换到发布分支,如 release/v2.10
git clone https://github.com/PaddlePaddle/Paddle-Lite.git
cd Paddle-Lite && git checkout release/v2.10

# (可选) 删除 third-party 目录,编译脚本会自动从国内 CDN 下载第三方库文件
# rm -rf third-party

# 执行编译脚本
./lite/tools/build_linux.sh
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9

编译成功后,会在Paddle-Lite/build.lite.linux.armv8.gcc/inference_lite_lib.armlinux.armv8/
python/install/dist 目录下生成对应的.whl轮子,直接用pip安装即可。

模型转换

安装x2paddle

pip install x2paddle
  • 1

onnx模型为例,更多查看link

from x2paddle.convert import onnx2paddle

model_path = "/home/server/桌面/paddle_demo/arm/yolov5s_sim.onnx"
save_dir = "./"

onnx2paddle(model_path, save_dir,
            convert_to_lite=True,
            lite_valid_places="arm",
            lite_model_type="naive_buffer")

# model_path(str) 为 ONNX 模型路径
# save_dir(str) 为转换后模型保存路径
# convert_to_lite(bool) 表示是否使用 opt 工具,默认为 False

# lite_valid_places(str) 指定转换类型,默认为 arm
# lite_valid_places参数目前可支持 arm、 opencl、 x86、 metal、 xpu、 bm、 mlu、
# intel_fpga、 huawei_ascend_npu、imagination_nna、
# rockchip_npu、 mediatek_apu、 huawei_kirin_npu、 amlogic_npu,可以同时指定多个硬件平台
# (以逗号分隔,优先级高的在前),opt 将会自动选择最佳方式。

# lite_model_type(str) 指定模型转化类型,目前支持两种类型:protobuf 和 naive_buffer,默认为 naive_buffer
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21

生成的.nb文件即为paddlelite的推理模型

利用paddlelite进行yolov5推理

from paddlelite.lite import *
import numpy as np
import cv2
import time

CLASSES=[]




def pynms(dets, thresh): #非极大抑制
    x1 = dets[:, 0]
    y1 = dets[:, 1]
    x2 = dets[:, 2]
    y2 = dets[:, 3]
    areas = (y2 - y1 + 1) * (x2 - x1 + 1)
    scores = dets[:, 4]
    keep = []
    index = scores.argsort()[::-1] #置信度从大到小排序(下标)

    while index.size > 0:
        i = index[0]
        keep.append(i)

        x11 = np.maximum(x1[i], x1[index[1:]])  # 计算相交面积
        y11 = np.maximum(y1[i], y1[index[1:]])
        x22 = np.minimum(x2[i], x2[index[1:]])
        y22 = np.minimum(y2[i], y2[index[1:]])

        w = np.maximum(0, x22 - x11 + 1)  # 当两个框不想交时x22 - x11或y22 - y11 为负数,
                                           # 两框不相交时把相交面积置0
        h = np.maximum(0, y22 - y11 + 1)  #

        overlaps = w * h
        ious = overlaps / (areas[i] + areas[index[1:]] - overlaps)#计算IOU

        idx = np.where(ious <= thresh)[0]  #IOU小于thresh的框保留下来
        index = index[idx + 1]  # 下标以1开始

    return keep


def xywh2xyxy(x):
    # [x, y, w, h] to [x1, y1, x2, y2]
    y = np.copy(x)
    y[:, 0] = x[:, 0] - x[:, 2] / 2
    y[:, 1] = x[:, 1] - x[:, 3] / 2
    y[:, 2] = x[:, 0] + x[:, 2] / 2
    y[:, 3] = x[:, 1] + x[:, 3] / 2
    return y

def filter_box(org_box,conf_thres,iou_thres): #过滤掉无用的框
    org_box=np.squeeze(org_box) #删除为1的维度
    conf = org_box[..., 4] > conf_thres #删除置信度小于conf_thres的BOX
    # print(conf)
    box = org_box[conf == True]
    cls_cinf = box[..., 5:]
    cls = []
    for i in range(len(cls_cinf)):
        cls.append(int(np.argmax(cls_cinf[i])))
    all_cls = list(set(cls))     #删除重复的类别
    output = []
    for i in range(len(all_cls)):
        curr_cls = all_cls[i]
        curr_cls_box = []
        curr_out_box = []
        for j in range(len(cls)):
            if cls[j] == curr_cls:
                box[j][5] = curr_cls #将第6列元素替换为类别下标
                curr_cls_box.append(box[j][:6])   #当前类别的BOX
        curr_cls_box = np.array(curr_cls_box)
        curr_cls_box = xywh2xyxy(curr_cls_box)
        curr_out_box = pynms(curr_cls_box,iou_thres) #经过非极大抑制后输出的BOX下标
        for k in curr_out_box:
            output.append(curr_cls_box[k])  #利用下标取出非极大抑制后的BOX
    output = np.array(output)
    return output
#将图片缩放到模型输入大小
def letterbox_r(im, new_shape=(640, 640), color=(0, 0, 0)):
    # Resize and pad image while meeting stride-multiple constraints
    shape = im.shape[:2]  # current shape [height, width]
    if isinstance(new_shape, int):
        new_shape = (new_shape, new_shape)

    # Scale ratio (new / old)
    r = min(new_shape[0] / shape[0], new_shape[1] / shape[1])

    # Compute padding
    ratio = r, r  # width, height ratios
    new_unpad = int(round(shape[1] * r)), int(round(shape[0] * r))
    dw, dh = new_shape[1] - new_unpad[0], new_shape[0] - new_unpad[1]  # wh padding

    dw /= 2  # divide padding into 2 sides
    dh /= 2

    if shape[::-1] != new_unpad:  # resize
        im = cv2.resize(im, new_unpad, interpolation=cv2.INTER_LINEAR)
    top, bottom = int(round(dh - 0.1)), int(round(dh + 0.1))
    left, right = int(round(dw - 0.1)), int(round(dw + 0.1))
    im = cv2.copyMakeBorder(im, top, bottom, left, right, cv2.BORDER_CONSTANT, value=color)  # add border
    return im, ratio, (dw, dh)
    
#将预测的坐标信息转换回原图尺度
def scale_coords(img1_shape, coords, img0_shape, ratio_pad=None):
    #  img1_shape: 缩放后的图像尺度
    #  coords: 预测的box信息
    #  img0_shape: 缩放前的图像尺度
    #  ratio_pad: 缩放过程中的缩放比例以及pad

    # Rescale coords (xyxy) from img1_shape to img0_shape
    if ratio_pad is None:  # calculate from img0_shape
        gain = max(img1_shape) / max(img0_shape)  # gain  = old / new
        pad = (img1_shape[1] - img0_shape[1] * gain) / 2, (img1_shape[0] - img0_shape[0] * gain) / 2  # wh padding
    else:
        gain = ratio_pad[0][0]
        pad = ratio_pad[1]

    coords[:, [0, 2]] -= pad[0]  # x padding
    coords[:, [1, 3]] -= pad[1]  # y padding
    coords[:, :4] /= gain
    clip_coords(coords, img0_shape)
    return coords

def clip_coords(boxes, img_shape):
    # # Clip bounding xyxy bounding boxes to image shape (height, width)
    # if isinstance(boxes, torch.Tensor):  # faster individually
    #     boxes[:, 0].clamp_(0, shape[1])  # x1
    #     boxes[:, 1].clamp_(0, shape[0])  # y1
    #     boxes[:, 2].clamp_(0, shape[1])  # x2
    #     boxes[:, 3].clamp_(0, shape[0])  # y2
    # else:  # np.array (faster grouped)
    boxes[:, [0, 2]] = boxes[:, [0, 2]].clip(0, img_shape[1])  # x1, x2
    boxes[:, [1, 3]] = boxes[:, [1, 3]].clip(0, img_shape[0])  # y1, y2


def draw(image,box_data):  #画图
    boxes=box_data[...,:4].astype(np.int32) #取整方便画框
    scores=box_data[...,4]
    classes=box_data[...,5].astype(np.int32) #下标取整

    for box, score, cl in zip(boxes, scores, classes):
        top, left, right, bottom = box
        print('class: {}, score: {}'.format(CLASSES[cl], score))
        print('box coordinate left,top,right,down: [{}, {}, {}, {}]'.format(top, left, right, bottom))

        cv2.rectangle(image, (top, left), (right, bottom), (255, 0, 0), 2)
        cv2.putText(image, '{0} {1:.2f}'.format(CLASSES[cl], score),
                    (top, left ),
                    cv2.FONT_HERSHEY_SIMPLEX,
                    0.6, (0, 0, 255), 2)



model_dir='opt.nb'
img_path='./1.jpg'
or_img=cv2.imread(img_path)
se_img=letterbox_r(or_img,(640,640))[0]
img=se_img[:,:,::-1].transpose(2,0,1)  #BGR2RGB和HWC2CHW
img=img.astype(dtype=np.float32)
img/=255.0
img = np.expand_dims(img, axis=0)
# print(img.shape)
# 1. Set config information
config = MobileConfig()
# 2. Set the path to the model generated by opt tools
config.set_model_from_file(model_dir)
# 3. Create predictor by config
predictor = create_paddle_predictor(config)
# print(predictor)

input_tensor = predictor.get_input(0)
input_tensor.from_numpy(img.astype("float32"))
s=time.time()
predictor.run()
output_tensor = predictor.get_output(0)
e=time.time()
print("infer_time:{}".format(e-s))
output_data = output_tensor.numpy()
# print(output_data.shape)
outbox=filter_box(output_data,0.5,0.5)
outbox[..., :4] = scale_coords(se_img.shape, outbox[..., :4], or_img.shape)
draw(or_img,outbox)
cv2.imwrite('./det2.jpg',or_img)
# cv2.imshow('res',or_img)
# cv2.waitKey(0)
# cv2.destroyallwindows()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154
  • 155
  • 156
  • 157
  • 158
  • 159
  • 160
  • 161
  • 162
  • 163
  • 164
  • 165
  • 166
  • 167
  • 168
  • 169
  • 170
  • 171
  • 172
  • 173
  • 174
  • 175
  • 176
  • 177
  • 178
  • 179
  • 180
  • 181
  • 182
  • 183
  • 184
  • 185
  • 186
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/214991
推荐阅读
相关标签
  

闽ICP备14008679号