赞
踩
仓库:https://github.com/ppogg/YOLOv5-Lite
pip install labelme
labelme
https://github.com/rooneysh/Labelme2YOLO
python labelme2yolo.py --json_dir C:\Users\Administrator\Desktop\cars\images
python3.8
# pip install -r requirements.txt
# base ----------------------------------------
matplotlib==3.2.2
numpy==1.20.3
opencv-python>=4.1.2
Pillow
PyYAML>=5.3.1
scipy>=1.4.1
torch==1.8.0
torchvision==0.9.0
tqdm>=4.41.0
# logging -------------------------------------
tensorboard==2.4.1
# wandb
# plotting ------------------------------------
seaborn>=0.11.0
pandas
# export --------------------------------------
# coremltools==4.1
# onnx==1.9.1
# scikit-learn==0.19.2 # for coreml quantization
# extras --------------------------------------
thop # FLOPS computation
pycocotools>=2.0 # COCO mAP
我只有一个类别,所以我这么训练:
python train.py --weights v5lite-e.pt --data /data/xiedong/YOLODataset/dataset.yaml --cfg ./models/v5Lite-e.yaml --epochs 100 --batch-size 8 --img-size 320 --single-cls --device cpu
# 训不出来小目标
python train.py --weights v5lite-s.pt --data /data/xiedong/YOLODataset/dataset.yaml --cfg ./models/v5Lite-s.yaml --epochs 100 --batch-size 8 --img-size 640 --single-cls --device cpu
# runs/train/exp12/weights/last.pt
python detect.py --weights /data/xiedong/YOLOv5-Lite-master/runs/train/exp12/weights/best.pt --source /data/xiedong/YOLODataset/images/train/001.png
python detect.py --weights ./yolov5lite_s_best.pt --source ./img_2.png
python export.py --weights /data/xiedong/YOLOv5-Lite-master/runs/train/exp12/weights/best.pt --img-size 640 --end2end --concat
import argparse
import time
from pathlib import Path
import cv2
import numpy as np
import torch
import torch.backends.cudnn as cudnn
from numpy import random
from models.experimental import attempt_load
from utils.datasets import LoadStreams, LoadImages, letterbox
from utils.general import check_img_size, check_requirements, check_imshow, non_max_suppression, apply_classifier, \
scale_coords, xyxy2xywh, strip_optimizer, set_logging, increment_path
from utils.plots import plot_one_box
from utils.torch_utils import select_device, load_classifier, time_synchronized
def detect(save_img=False):
weights, imgsz = opt.weights, opt.img_size
device = select_device(opt.device)
half = device.type != 'cpu' # half precision only supported on CUDA
# Load model
model = attempt_load(weights, map_location=device) # load FP32 model
stride = int(model.stride.max()) # model stride
imgsz = check_img_size(imgsz, s=stride) # check img_size
if half:
model.half() # to FP16
img0 = cv2.imread("img_2.png")
# Padded resize
img = letterbox(img0, imgsz, stride=stride)[0]
img = img[:, :, ::-1].transpose(2, 0, 1) # BGR to RGB, to 3x416x416
img = np.ascontiguousarray(img)
# Get names and colors
names = model.module.names if hasattr(model, 'module') else model.names
colors = [[random.randint(0, 255) for _ in range(3)] for _ in names]
t0 = time.time()
img = torch.from_numpy(img).to(device)
img = img.half() if half else img.float() # uint8 to fp16/32
img /= 255.0 # 0 - 255 to 0.0 - 1.0
if img.ndimension() == 3:
img = img.unsqueeze(0)
# Inference
t1 = time_synchronized()
pred = model(img)[0]
# Apply NMS
pred = non_max_suppression(pred, opt.conf_thres, opt.iou_thres)
t2 = time_synchronized()
# Process detections
for i, det in enumerate(pred): # detections per image
if len(det):
det[:, :4] = scale_coords(img.shape[2:], det[:, :4], img0.shape).round()
for *xyxy, conf, cls in reversed(det):
label = f'{names[int(cls)]} {conf:.2f}'
plot_one_box(xyxy, img0, label=label, color=colors[int(cls)], line_thickness=3)
print(f'Done. ({time.time() - t0:.3f}s)')
cv2.imwrite("output.png", img0)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--weights', nargs='+', type=str, default='yolov5lite_s_best.pt', help='model.pt path(s)')
parser.add_argument('--img-size', type=int, default=640, help='inference size (pixels)')
parser.add_argument('--conf-thres', type=float, default=0.4, help='object confidence threshold')
parser.add_argument('--iou-thres', type=float, default=0.5, help='IOU threshold for NMS')
parser.add_argument('--device', default='cpu', help='cuda device, i.e. 0 or 0,1,2,3 or cpu')
opt = parser.parse_args()
print(opt)
check_requirements(exclude=('pycocotools', 'thop'))
detect()
你如果需要帮助,请看这里:
https://docs.qq.com/sheet/DUEdqZ2lmbmR6UVdU?tab=BB08J2
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。