赞
踩
一、错误提示
二、原因分析
使用多个yolov5项目,或者yolo项目,相互调用时,下级推理程序会直接调用上级(即根目录下)的程序文件,例如:下级推理文件会直接调用上级模型项目中utils包中的plots.py,而不会调用同级目录下的utils包。当上级项目目录下的程序文件发生变化更改时,就会发生调用错误。
三、解决方法
使用onnx的权重格式,代替.pt格式
1)使用export.py文件导出best.onnx文件
2)推理文件
- import argparse
- import os
- import platform
- import sys
- from pathlib import Path
-
- import torch
- from models.common import DetectMultiBackend
- from utils.dataloaders import IMG_FORMATS, VID_FORMATS, LoadImages, LoadScreenshots, LoadStreams
- from utils.general import (LOGGER, Profile, check_file, check_img_size, check_imshow, check_requirements, colorstr, cv2,
- increment_path, non_max_suppression, print_args, scale_boxes, strip_optimizer, xyxy2xywh)
- from utils.plots import Annotator, colors, save_one_box
- from utils.torch_utils import select_device, smart_inference_mode
- import onnxruntime
-
-
- def my_letter_box(img,size=(640,640)):
- h,w,c = img.shape
- r = min(size[0]/h,size[1]/w)
- new_h,new_w = int(h*r),int(w*r)
- top = int((size[0]-new_h)/2)
- left = int((size[1]-new_w)/2)
-
- bottom = size[0]-new_h-top
- right = size[1]-new_w-left
- img_resize = cv2.resize(img,(new_w,new_h))
- img = cv2.copyMakeBorder(img_resize,top,bottom,left,right,borderType=cv2.BORDER_CONSTANT,value=(114,114,114))
- return img,r,left,top
-
- def xywh2xyxy(boxes):
- xywh =copy.deepcopy(boxes)
- xywh[:,0]=boxes[:,0]-boxes[:,2]/2
- xywh[:,1]=boxes[:,1]-boxes[:,3]/2
- xywh[:,2]=boxes[:,0]+boxes[:,2]/2
- xywh[:,3]=boxes[:,1]+boxes[:,3]/2
- return xywh
-
- def my_nms(boxes,iou_thresh):
- index = np.argsort(boxes[:,4])[::-1]
- keep = []
- while index.size >0:
- i = index[0]
- keep.append(i)
- x1=np.maximum(boxes[i,0],boxes[index[1:],0])
- y1=np.maximum(boxes[i,1],boxes[index[1:],1])
- x2=np.minimum(boxes[i,2],boxes[index[1:],2])
- y2=np.minimum(boxes[i,3],boxes[index[1:],3])
-
- w = np.maximum(0,x2-x1)
- h = np.maximum(0,y2-y1)
-
- inter_area = w*h
- union_area = (boxes[i,2]-boxes[i,0])*(boxes[i,3]-boxes[i,1])+(boxes[index[1:],2]-boxes[index[1:],0])*(boxes[index[1:],3]-boxes[index[1:],1])
- iou = inter_area/(union_area-inter_area)
- idx = np.where(iou<=iou_thresh)[0]
- index = index[idx+1]
- return keep
-
- def restore_box(boxes,r,left,top):
- boxes[:,[0,2,5,7,9,11]]-=left
- boxes[:,[1,3,6,8,10,12]]-=top
-
- boxes[:,[0,2,5,7,9,11]]/=r
- boxes[:,[1,3,6,8,10,12]]/=r
- return boxes
-
-
- def post_precessing(dets,r,left,top,conf_thresh=0.3,iou_thresh=0.5):
- choice = dets[:,:,4]>conf_thresh
- dets=dets[choice]
- dets[:,13:15]*=dets[:,4:5]
- box = dets[:,:4]
- boxes = xywh2xyxy(box)
- score= np.max(dets[:,13:15],axis=-1,keepdims=True)
- index = np.argmax(dets[:,13:15],axis=-1).reshape(-1,1)
- output = np.concatenate((boxes,score,dets[:,5:13],index),axis=1)
- reserve_=my_nms(output,iou_thresh)
- output=output[reserve_]
- output = restore_box(output,r,left,top)
- return output
-
-
-
- weight='./best.onnx'
-
- providers = ['CPUExecutionProvider']
- session = onnxruntime.InferenceSession(wheelweight, providers=providers)
- img,r,left,top=my_letter_box(img)
-
- img =img[:,:,::-1].transpose(2,0,1).copy().astype(np.float32)
- img=img.reshape(1,*img.shape)
-
- y_onnx = session.run([session.get_outputs()[0].name], {session.get_inputs()[0].name: img})[0]
- outputs = post_precessing(y_onnx,r,left,top)
-
- if len(outputs)>1:
- continue
- for output in outputs:
- output = output.tolist()
- rect=output[:4]
- cv2.rectangle(img,(int(rect[0]),int(rect[1])),(int(rect[2]),int(rect[3])),(0,255,255),thickness=3)
-
-
-
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。