赞
踩
import os import cv2 import json from tqdm import tqdm from sklearn.model_selection import train_test_split import argparse # 根据自己的数据集类别进行修改 classes =['car', 'truck', 'bus'] parser = argparse.ArgumentParser() # 验证集图像的路径 parser.add_argument('--image_path', default='/home/root1/data/miya/yolov8/data/uavdt/val/images',type=str, help="path of images") # 验证集标签的路径 parser.add_argument('--label_path', default='/home/root1/data/miya/yolov8/data/uavdt/val/labels',type=str, help="path of labels .txt") # 生成data.json的路径 parser.add_argument('--save_path', type=str,default='data.json', help="if not split the dataset, give a path to a json file") arg = parser.parse_args() def yolo2coco(arg): print("Loading data from ", arg.image_path, arg.label_path) assert os.path.exists(arg.image_path) assert os.path.exists(arg.label_path) originImagesDir = arg.image_path originLabelsDir = arg.label_path # images dir name indexes = os.listdir(originImagesDir) dataset = {'categories': [], 'annotations': [], 'images': []} for i, cls in enumerate(classes, 0): dataset['categories'].append({'id': i, 'name': cls, 'supercategory': 'mark'}) # 标注的id ann_id_cnt = 0 for k, index in enumerate(tqdm(indexes)): # 支持 png jpg 格式的图片. txtFile = f'{index[:index.rfind(".")]}.txt' stem = index[:index.rfind(".")] # 读取图像的宽和高 try: im = cv2.imread(os.path.join(originImagesDir, index)) height, width, _ = im.shape except Exception as e: print(f'{os.path.join(originImagesDir, index)} read error.\nerror:{e}') # 添加图像的信息 if not os.path.exists(os.path.join(originLabelsDir, txtFile)): # 如没标签,跳过,只保留图片信息. continue dataset['images'].append({'file_name': index, 'id': stem, 'width': width, 'height': height}) with open(os.path.join(originLabelsDir, txtFile), 'r') as fr: labelList = fr.readlines() for label in labelList: label = label.strip().split() x = float(label[1]) y = float(label[2]) w = float(label[3]) h = float(label[4]) # convert x,y,w,h to x1,y1,x2,y2 H, W, _ = im.shape x1 = (x - w / 2) * W y1 = (y - h / 2) * H x2 = (x + w / 2) * W y2 = (y + h / 2) * H # 标签序号从0开始计算, coco2017数据集标号混乱,不管它了。 cls_id = int(label[0]) width = max(0, x2 - x1) height = max(0, y2 - y1) dataset['annotations'].append({ 'area': width * height, 'bbox': [x1, y1, width, height], 'category_id': cls_id, 'id': ann_id_cnt, 'image_id': stem, 'iscrowd': 0, # mask, 矩形是从左上角点按顺时针的四个顶点 'segmentation': [[x1, y1, x2, y1, x2, y2, x1, y2]] }) ann_id_cnt += 1 # 保存结果 with open(arg.save_path, 'w') as f: json.dump(dataset, f) print('Save annotation to {}'.format(arg.save_path)) if __name__ == "__main__": yolo2coco(arg)
import warnings warnings.filterwarnings('ignore') from ultralytics import RTDETR # 加载权重 model = RTDETR("/home/root1/data/miya/yolov8/uav/runs/train/rt8/weights/best.pt") # 数据集yaml文件路径 model.val(data='/home/root1/data/miya/yolov8/data/uavdt/uavdt.yaml', split='val', imgsz=1024, batch=1, workers=8, device='0', # rect=False, save_json=True, # if you need to cal coco metrice project='runs/val', name='exp', )
运行结果如下,因为我只是测试了一下代码是否正常运行,所以这里使用第一轮的权重,所以可以看到指标都很低
import argparse from pycocotools.coco import COCO from pycocotools.cocoeval import COCOeval from tidecv import TIDE, datasets def parse_opt(): parser = argparse.ArgumentParser() # data.json的路径 parser.add_argument('--anno_json', type=str, default='data.json', help='training model path') # predictions.json的路径 parser.add_argument('--pred_json', type=str, default='runs/val/exp17/predictions.json', help='data yaml path') return parser.parse_known_args()[0] if __name__ == '__main__': opt = parse_opt() anno_json = opt.anno_json pred_json = opt.pred_json anno = COCO(anno_json) # init annotations api pred = anno.loadRes(pred_json) # init predictions api eval = COCOeval(anno, pred, 'bbox') eval.evaluate() eval.accumulate() eval.summarize() tide = TIDE() tide.evaluate_range(datasets.COCO(anno_json), datasets.COCOResult(pred_json), mode=TIDE.BOX) tide.summarize() tide.plot(out_dir='result')
运行一下,指标为0的原因就是因为我用了第一轮的权重,本身验证的就很低
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。