当前位置:   article > 正文

YOLOv8实例分割训练自己的数据集

yolov8实例分割训练自己的数据集

转载https://blog.csdn.net/m0_51530640/article/details/129975257

1.利用labelme进行数据标注

1.1Labelme 安装方法

首先安装 Anaconda,然后运行下列命令:


  
  
  1. ###### ###### ######
  2. ## for Python 2 ##
  3. ###### ###### ######
  4. conda create --name=labelme python= 2.7
  5. source activate labelme
  6. # conda install -c conda-forge pyside2
  7. conda install pyqt
  8. pip install labelme
  9. # 如果想安装最新版本,请使用下列命令安装:
  10. # pip install git+https://github.com/wkentaro/labelme.git
  11. ###### ###### ######
  12. ## for Python 3 ##
  13. ###### ###### ######
  14. conda create --name=labelme python= 3.6
  15. source activate labelme
  16. # conda install -c conda-forge pyside2
  17. # conda install pyqt
  18. pip install pyqt5 # pyqt5 can be installed via pip on python3
  19. pip install labelme
  20. 输入以下指令打开
  21. labelme
  • 1

1.2Labelme 使用教程

使用 labelme 进行场景分割标注的教程详见:labelme

2.转换划分数据集

    对数据集进行转换和划分。注意:在数据标注的时候将图片和json文件放在不同的文件夹里。如下图所示,另外新建两个文件夹txt 和split。

a186b78c54f447b3a9dc8f30042418f6.png

 2.1将json格式文件转换为txt格式

新建json2txt.py文件,修改文件路径为自己的路径


  
  
  1. # - *- coding: utf- 8 - *-
  2. import json
  3. import os
  4. import argparse
  5. from tqdm import tqdm
  6. def convert_label_json(json_dir, save_dir, classes):
  7. json_paths = os.listdir(json_dir)
  8. classes = classes.split( ',')
  9. for json_path in tqdm(json_paths):
  10. # for json_path in json_paths:
  11. path = os.path.join(json_dir, json_path)
  12. with open(path, 'r') as load_f:
  13. json_dict = json.load(load_f)
  14. h, w = json_dict[ 'imageHeight'], json_dict[ 'imageWidth']
  15. # save txt path
  16. txt_path = os.path.join(save_dir, json_path. replace( 'json', 'txt'))
  17. txt_ file = open(txt_path, 'w')
  18. for shape_dict in json_dict[ 'shapes']:
  19. label = shape_dict[ 'label']
  20. label_ index = classes. index(label)
  21. points = shape_dict[ 'points']
  22. points_nor_list = []
  23. for point in points:
  24. points_nor_list.append(point[ 0] / w)
  25. points_nor_list.append(point[ 1] / h)
  26. points_nor_list = list(map(lambda x: str(x), points_nor_list))
  27. points_nor_str = ' '.join(points_nor_list)
  28. label_str = str(label_ index) + ' ' + points_nor_str + '\n'
  29. txt_ file.writelines(label_str)
  30. if __name__ = = "__main__":
  31. "" "
  32. python json2txt_nomalize.py --json-dir my_datasets/color_rings/jsons --save-dir my_datasets/color_rings/txts --classes "cat,dogs "
  33. " ""
  34. parser = argparse.ArgumentParser(description = 'json convert to txt params')
  35. parser. add_argument( '--json-dir', type =str, default = 'D:/ultralytics-main/data/json', help = 'json path dir')
  36. parser. add_argument( '--save-dir', type =str, default = 'D:/ultralytics-main/data/txt' ,help = 'txt save dir')
  37. parser. add_argument( '--classes', type =str, default = 'ccc,ccc1',help = 'classes')
  38. args = parser.parse_args()
  39. json_dir = args.json_dir
  40. save_dir = args.save_dir
  41. classes = args.classes
  42. convert_label_json(json_dir, save_dir, classes)
  • 1

 2.2划分数据集

新建split.py,修改文件路径为自己的路径


  
  
  1. # 将图片和标注数据按比例切分为 训练集和测试集
  2. import shutil
  3. import random
  4. import os
  5. import argparse
  6. # 检查文件夹是否存在
  7. def mkdir(path):
  8. if not os.path.exists(path):
  9. os.makedirs(path)
  10. def main(image_dir, txt_dir, save_dir):
  11. # 创建文件夹
  12. mkdir(save_dir)
  13. images_dir = os.path.join(save_dir, 'images')
  14. labels_dir = os.path.join(save_dir, 'labels')
  15. img_train_path = os.path.join(images_dir, 'train')
  16. img_ test_path = os.path.join(images_dir, 'test')
  17. img_val_path = os.path.join(images_dir, 'val')
  18. label_train_path = os.path.join(labels_dir, 'train')
  19. label_ test_path = os.path.join(labels_dir, 'test')
  20. label_val_path = os.path.join(labels_dir, 'val')
  21. mkdir(images_dir);
  22. mkdir(labels_dir);
  23. mkdir(img_train_path);
  24. mkdir(img_ test_path);
  25. mkdir(img_val_path);
  26. mkdir(label_train_path);
  27. mkdir(label_ test_path);
  28. mkdir(label_val_path);
  29. # 数据集划分比例,训练集 75%,验证集 15%,测试集 15%,按需修改
  30. train_percent = 0.8
  31. val_percent = 0.1
  32. test_percent = 0.1
  33. total_txt = os.listdir(txt_dir)
  34. num_txt = len(total_txt)
  35. list_ all_txt = range(num_txt) # 范围 range( 0, num)
  36. num_train = int(num_txt * train_percent)
  37. num_val = int(num_txt * val_percent)
  38. num_ test = num_txt - num_train - num_val
  39. train = random.sample(list_ all_txt, num_train)
  40. # 在全部数据集中取出train
  41. val_ test = [i for i in list_ all_txt if not i in train]
  42. # 再从val_ test取出num_val个元素,val_ test剩下的元素就是 test
  43. val = random.sample(val_ test, num_val)
  44. print( "训练集数目:{}, 验证集数目:{},测试集数目:{}". format(len(train), len(val), len(val_ test) - len(val)))
  45. for i in list_ all_txt:
  46. name = total_txt[i][:- 4]
  47. srcImage = os.path.join(image_dir, name + '.jpg')
  48. srcLabel = os.path.join(txt_dir, name + '.txt')
  49. if i in train:
  50. dst_train_Image = os.path.join(img_train_path, name + '.jpg')
  51. dst_train_Label = os.path.join(label_train_path, name + '.txt')
  52. shutil.copyfile(srcImage, dst_train_Image)
  53. shutil.copyfile(srcLabel, dst_train_Label)
  54. elif i in val:
  55. dst_val_Image = os.path.join(img_val_path, name + '.jpg')
  56. dst_val_Label = os.path.join(label_val_path, name + '.txt')
  57. shutil.copyfile(srcImage, dst_val_Image)
  58. shutil.copyfile(srcLabel, dst_val_Label)
  59. else:
  60. dst_ test_Image = os.path.join(img_ test_path, name + '.jpg')
  61. dst_ test_Label = os.path.join(label_ test_path, name + '.txt')
  62. shutil.copyfile(srcImage, dst_ test_Image)
  63. shutil.copyfile(srcLabel, dst_ test_Label)
  64. if __name__ = = '__main__':
  65. "" "
  66. python split_datasets.py --image-dir my_datasets/color_rings/imgs --txt-dir my_datasets/color_rings/txts --save-dir my_datasets/color_rings/train_data
  67. " ""
  68. parser = argparse.ArgumentParser(description = 'split datasets to train,val,test params')
  69. parser. add_argument( '--image-dir', type =str, default = 'D:/ultralytics-main/data', help = 'image path dir')
  70. parser. add_argument( '--txt-dir', type =str, default = 'D:/ultralytics-main/data/txt' , help = 'txt path dir')
  71. parser. add_argument( '--save-dir', default = 'D:/ultralytics-main/data/split', type =str, help = 'save dir')
  72. args = parser.parse_args()
  73. image_dir = args.image_dir
  74. txt_dir = args.txt_dir
  75. save_dir = args.save_dir
  76. main(image_dir, txt_dir, save_dir)
  • 1

运行完后得到如下文件

849ec015ad054cbfb1d87db29a169253.png

3.训练设置

3.1新建seg.yaml文件 ,按照下列格式创建   我一般写成绝对路径,方便一点。


  
  
  1. train: D:\ultralytics-main\ data\split\images\train # train images ( relative to 'path') 128 images
  2. val: D:\ultralytics-main\ data\split\images\val # val images ( relative to 'path') 128 images
  3. test: D:\ultralytics-main\ data\split\images\ test # test images ( optional)
  4. # Classes
  5. names:
  6. 0: ccc
  7. 1: ccc 1
  • 1

3.2训练参数设置


  
  
  1. task: segment # YOLO task, i.e. detect, segment, classify, pose
  2. mode: train # YOLO mode, i.e. train, val, predict, export, track, benchmark
  3. # Train settings -------------------------------------------------------------------------------------------------------
  4. model: yolov 8s-seg.yaml # path to model file, i.e. yolov 8n.pt, yolov 8n.yaml
  5. #model:runs /detect /yolov 8s /weights /best.pt
  6. data: seg.yaml # path to data file, i.e. coco 128.yaml
  7. epochs: 10 # number of epochs to train for
  8. patience: 50 # epochs to wait for no observable improvement for early stopping of training
  9. batch: 16 # number of images per batch (- 1 for AutoBatch)
  • 1

然后开始训练即可。

参考:

(52条消息) 数据标注软件labelme详解_黑暗星球的博客-CSDN博客

(52条消息) YOLOv5-7.0实例分割训练自己的数据,切分mask图并摆正_yolo 图像分割_jin__9981的博客-CSDN博客

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Monodyee/article/detail/122479
推荐阅读
相关标签
  

闽ICP备14008679号