当前位置:   article > 正文

yolov8 训练自己的数据集_volov8 dataset train test val

volov8 dataset train test val

1,准备数据集

1.1voc数据转yolo数据

voc格式

  1. 文件联级如下所述:
  2. VOCdevkit
  3. ---VOC2007
  4. ---Annotations
  5. ---ImageSets
  6. ---JPEGImages

yolo格式

  1. voc_to_yolo.py
  2. from tqdm import tqdm
  3. import shutil
  4. from pathlib import Path
  5. import xml.etree.ElementTree as ET
  6. def convert_label(path, lb_path, year, image_id, names):
  7. def convert_box(size, box):
  8. dw, dh = 1. / size[0], 1. / size[1]
  9. x, y, w, h = (box[0] + box[1]) / 2.0 - 1, (box[2] + box[3]) / 2.0 - 1, box[1] - box[0], box[3] - box[2]
  10. return x * dw, y * dh, w * dw, h * dh
  11. in_file = open(path / f'VOC{year}/Annotations/{image_id}.xml')
  12. out_file = open(lb_path, 'w')
  13. tree = ET.parse(in_file)
  14. root = tree.getroot()
  15. size = root.find('size')
  16. w = int(size.find('width').text)
  17. h = int(size.find('height').text)
  18. for obj in root.iter('object'):
  19. cls = obj.find('name').text
  20. if cls in names:
  21. xmlbox = obj.find('bndbox')
  22. bb = convert_box((w, h), [float(xmlbox.find(x).text) for x in ('xmin', 'xmax', 'ymin', 'ymax')])
  23. cls_id = names.index(cls) # class id
  24. out_file.write(" ".join(str(a) for a in (cls_id, *bb)) + '\n')
  25. else:
  26. print("category error: ", cls)
  27. year = "2007"
  28. image_sets = ["train", "val"]
  29. path = Path("H:\\work\\daodan_move\\ultralytics-main\\ultralytics\\datasets\\VOCdevkit\\")
  30. class_names = ["call","dislike","fist","four","like","mute","ok","one","palm","1","2","3","4","5","6","7","8","9","10"]
  31. for image_set in image_sets:
  32. imgs_path = path / 'images' / f'{image_set}'
  33. lbs_path = path / 'labels' / f'{image_set}'
  34. imgs_path.mkdir(exist_ok=True, parents=True)
  35. lbs_path.mkdir(exist_ok=True, parents=True)
  36. with open(path / f'VOC{year}/ImageSets/Main/{image_set}.txt') as f:
  37. image_ids = f.read().strip().split()
  38. for id in tqdm(image_ids, desc=f'{image_set}'):
  39. f = path / f'VOC{year}/JPEGImages/{id}.jpg' # old img path
  40. lb_path = (lbs_path / f.name).with_suffix('.txt') # new label path
  41. # f.rename(imgs_path / f.name) # move image
  42. shutil.copyfile(f, imgs_path / f.name) # copy image
  43. convert_label(path, lb_path, year, id, class_names) # convert labels to YOLO format

1.2 COCO格式转YOLO格式 转换脚本

coco格式

  1. ── VOCdevkit
  2. ├── images
  3. │ ├── train # 存放训练集图片
  4. │ └── val # 存放验证集图片
  5. └── labels
  6. ├── train # 存放训练集标注文件
  7. └── val # 存放验证集标注文件

  1. import json
  2. import os
  3. import shutil
  4. from tqdm import tqdm
  5. coco_path = "F:/datasets/Apple_Detection_Swift-YOLO_192"
  6. output_path = "F:/vsCode/ultralytics/datasets/Apple"
  7. os.makedirs(os.path.join(output_path, "images", "train"), exist_ok=True)
  8. os.makedirs(os.path.join(output_path, "images", "val"), exist_ok=True)
  9. os.makedirs(os.path.join(output_path, "labels", "train"), exist_ok=True)
  10. os.makedirs(os.path.join(output_path, "labels", "val"), exist_ok=True)
  11. with open(os.path.join(coco_path, "train", "_annotations.coco.json"), "r") as f:
  12. train_annotations = json.load(f)
  13. with open(os.path.join(coco_path, "valid", "_annotations.coco.json"), "r") as f:
  14. val_annotations = json.load(f)
  15. # Iterate over the training images
  16. for image in tqdm(train_annotations["images"]):
  17. width, height = image["width"], image["height"]
  18. scale_x = 1.0 / width
  19. scale_y = 1.0 / height
  20. label = ""
  21. for annotation in train_annotations["annotations"]:
  22. if annotation["image_id"] == image["id"]:
  23. # Convert the annotation to YOLO format
  24. x, y, w, h = annotation["bbox"]
  25. x_center = x + w / 2.0
  26. y_center = y + h / 2.0
  27. x_center *= scale_x
  28. y_center *= scale_y
  29. w *= scale_x
  30. h *= scale_y
  31. class_id = annotation["category_id"]
  32. label += "{} {} {} {} {}\n".format(class_id, x_center, y_center, w, h)
  33. # Save the image and label
  34. shutil.copy(os.path.join(coco_path, "train", image["file_name"]), os.path.join(output_path, "images", "train", image["file_name"]))
  35. with open(os.path.join(output_path, "labels", "train", image["file_name"].replace(".jpg", ".txt")), "w") as f:
  36. f.write(label)
  37. # Iterate over the validation images
  38. for image in tqdm(val_annotations["images"]):
  39. width, height = image["width"], image["height"]
  40. scale_x = 1.0 / width
  41. scale_y = 1.0 / height
  42. label = ""
  43. for annotation in val_annotations["annotations"]:
  44. if annotation["image_id"] == image["id"]:
  45. # Convert the annotation to YOLO format
  46. x, y, w, h = annotation["bbox"]
  47. x_center = x + w / 2.0
  48. y_center = y + h / 2.0
  49. x_center *= scale_x
  50. y_center *= scale_y
  51. w *= scale_x
  52. h *= scale_y
  53. class_id = annotation["category_id"]
  54. label += "{} {} {} {} {}\n".format(class_id, x_center, y_center, w, h)
  55. # Save the image and label
  56. shutil.copy(os.path.join(coco_path, "valid", image["file_name"]), os.path.join(output_path, "images", "val", image["file_name"]))
  57. with open(os.path.join(output_path, "labels", "val", image["file_name"].replace(".jpg", ".txt")), "w") as f:
  58. f.write(label)

2.训练数据

将voc.yaml复制为voc_self.yaml,然后修改如下:

  1. # Ultralytics YOLO
    声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/秋刀鱼在做梦/article/detail/895867
    推荐阅读
    相关标签