当前位置:   article > 正文

[缝合]自制COCO格式的关键点检测数据集+训练HRnet_转成coco 关键点数据

转成coco 关键点数据

一、labelme标注生成Json文件

标注完成后的json文件大概如下:

  1. "shapes": [
  2. {
  3. "label": "joint", # 目标检测框的类别
  4. "points": [ # 目标检测框的位置:左上以及右下的坐标(X,Y)
  5. [
  6. 767.0000000000005, # 坐标系 左上角为(00)横为x 纵为y
  7. 900.2692307692307
  8. ],
  9. [
  10. 2028.538461538462,
  11. 2388.7307692307695
  12. ]
  13. ],
  14. "group_id": null,
  15. "shape_type": "rectangle",
  16. "flags": {}
  17. },
  18. {
  19. "label": "patella_1", # 关键点类别
  20. "points": [ # 关键点的坐标
  21. [
  22. 1717.0000000000005,
  23. 1257.9615384615383
  24. ]
  25. ],
  26. "group_id": null,
  27. "shape_type": "point",
  28. "flags": {}
  29. }
  30. ]

二、转换labelme的json为coco格式的标注

代码执行的流程:

1、change_name.py

文件的名称都需要改成coco格式的12位数字,空余填充0 ,从0开始,不然最后验证时匹配有问题 

2、divide_jpg_json.py

原图和 json 文件分开到两个文件夹中

3、json2coco_1.py

根据json生成coco格式的标注文件

coco格式解释:

  1. {
  2. "info" # 没啥用
  3. "licenses" # 没啥用
  4. "images": [ # 每个图像一个images字段
  5. {
  6. "height": 2845,
  7. "width": 1832,
  8. "id": 1,
  9. "file_name": "000000000185.jpg"
  10. }
  11. "annotations": [ # 每个框一个annotations字段
  12. {
  13. "id": 0, # 框的id
  14. "image_id": 1, # 该框对应的图像的id,需要与image中的id对应
  15. "category_id": 1, # 目标检测类别id
  16. "iscrowd": 0, # 是否被遮挡,0表示未被遮挡
  17. "area": 1.0, # 面积
  18. "segmentation": null,
  19. "bbox": [ # 框的 x,y,width,height
  20. 691.0,
  21. 1089.7,
  22. 986.5,
  23. 1000.2
  24. ],
  25. "keypoints": [ # 关键点坐标xyv v=0表示未标注,=1表示不可见 =2表示标注了且可见
  26. 1424.3,
  27. 1285.35,
  28. 2,
  29. 1404.33,
  30. 1602.5,
  31. 2,
  32. 1197.5,
  33. 1812.5,
  34. 2
  35. ],
  36. "num_keypoints": 3 # 标注了的关键点数量
  37. },
  38. "categories": [
  39. {
  40. "supercategory": "joint",
  41. "id": 1, # 目标检测类别号
  42. "name": "joint", # 类别名
  43. "keypoint": [ # 关键点类别
  44. "patella_1",
  45. "patella_2",
  46. "tibia_1"
  47. ]
  48. }
  49. }

转换代码如下:

area好像会影响后面计算iou,先标记,还没改正

已修改:

annotation['area'] = self._get_box(bbox)[2]*self._get_box(bbox)[3]  # w*h

自动分割验证集以及训练集:

(我用的是不自动分割)

  1. import os
  2. import sys
  3. import glob
  4. import json
  5. import shutil
  6. import argparse
  7. import numpy as np
  8. import PIL.Image
  9. import os.path as osp
  10. from tqdm import tqdm
  11. from labelme import utils
  12. from sklearn.model_selection import train_test_split
  13. class Labelme2coco_keypoints():
  14. def __init__(self, args):
  15. """
  16. Lableme 关键点数据集转 COCO 数据集的构造函数:
  17. Args
  18. args:命令行输入的参数
  19. - class_name 根类名字
  20. """
  21. self.classname_to_id = {args.class_name: 1}
  22. self.images = []
  23. self.annotations = []
  24. self.categories = []
  25. self.ann_id = 0
  26. self.img_id = 0
  27. def save_coco_json(self, instance, save_path):
  28. json.dump(instance, open(save_path, 'w', encoding='utf-8'), ensure_ascii=False, indent=1)
  29. def read_jsonfile(self, path):
  30. with open(path, "r", encoding='utf-8') as f:
  31. return json.load(f)
  32. def _get_box(self, points):
  33. min_x = min_y = np.inf
  34. max_x = max_y = 0
  35. for x, y in points:
  36. min_x = min(min_x, x)
  37. min_y = min(min_y, y)
  38. max_x = max(max_x, x)
  39. max_y = max(max_y, y)
  40. return [min_x, min_y, max_x - min_x, max_y - min_y]
  41. def _get_keypoints(self, points, keypoints, num_keypoints):
  42. """
  43. 解析 labelme 的原始数据, 生成 coco 标注的 关键点对象
  44. 例如:
  45. "keypoints": [
  46. 67.06149888292556, # x 的值
  47. 122.5043507571318, # y 的值
  48. 1, # 相当于 Z 值,如果是2D关键点 0:不可见 1:表示可见。 xxx 1表示标注了但不可见,2表示可见
  49. 82.42582269256718,
  50. 109.95672933232304,
  51. 1,
  52. ...,
  53. ],
  54. """
  55. if points[0] == 0 and points[1] == 0:
  56. visable = 0
  57. else:
  58. visable = 2
  59. num_keypoints += 1
  60. keypoints.extend([points[0], points[1], visable])
  61. return keypoints, num_keypoints
  62. def _image(self, obj, path):
  63. """
  64. 解析 labelme 的 obj 对象,生成 coco 的 image 对象
  65. 生成包括:id,file_name,height,width 4个属性
  66. 示例:
  67. {
  68. "file_name": "training/rgb/00031426.jpg",
  69. "height": 224,
  70. "width": 224,
  71. "id": 31426
  72. }
  73. """
  74. image = {}
  75. #img_x = utils.img_b64_to_arr(obj['imageData']) # 获得原始 labelme 标签的 imageData 属性,并通过 labelme 的工具方法转成 array
  76. image['height'] = obj['imageHeight']
  77. image['width'] = obj['imageWidth'] # 获得图片的宽高
  78. # self.img_id = int(os.path.basename(path).split(".json")[0])
  79. image['id'] = self.img_id
  80. self.img_id = self.img_id + 1
  81. image['file_name'] = os.path.basename(path).replace(".json", ".jpg")
  82. return image
  83. def _annotation(self, bboxes_list, keypoints_list, json_path):
  84. """
  85. 生成coco标注
  86. Args:
  87. bboxes_list: 矩形标注框
  88. keypoints_list: 关键点
  89. json_path:json文件路径
  90. """
  91. if len(keypoints_list) != args.join_num * len(bboxes_list):
  92. print('you loss {} keypoint(s) with file {}'.format(args.join_num * len(bboxes_list) - len(keypoints_list), json_path))
  93. print('Please check !!!')
  94. sys.exit()
  95. i = 0
  96. for object in bboxes_list:
  97. annotation = {}
  98. keypoints = []
  99. num_keypoints = 0
  100. label = object['label']
  101. bbox = object['points']
  102. annotation['id'] = self.ann_id
  103. annotation['image_id'] = self.img_id-1
  104. annotation['category_id'] = int(self.classname_to_id[label])
  105. annotation['iscrowd'] = 0
  106. annotation['area'] = self._get_box(bbox)[2]*self._get_box(bbox)[3] # w*h
  107. annotation['segmentation'] = None #[np.asarray(bbox).flatten().tolist()]
  108. annotation['bbox'] = self._get_box(bbox)
  109. for keypoint in keypoints_list[i * args.join_num: (i + 1) * args.join_num]:
  110. point = keypoint['points']
  111. annotation['keypoints'], num_keypoints = self._get_keypoints(point[0], keypoints, num_keypoints)
  112. annotation['num_keypoints'] = num_keypoints
  113. i += 1
  114. self.ann_id += 1
  115. self.annotations.append(annotation)
  116. def _init_categories(self):
  117. """
  118. 初始化 COCO 的 标注类别
  119. 例如:
  120. "categories": [
  121. {
  122. "supercategory": "hand",
  123. "id": 1,
  124. "name": "hand",
  125. "keypoints": [
  126. "wrist",
  127. "thumb1",
  128. "thumb2",
  129. ...,
  130. ],
  131. "skeleton": [
  132. ]
  133. }
  134. ]
  135. """
  136. for name, id in self.classname_to_id.items():
  137. category = {}
  138. category['supercategory'] = name
  139. category['id'] = id
  140. category['name'] = name
  141. # 21 个关键点数据
  142. ['patella_1', 'patella_2', 'tibia_1']
  143. category['keypoint'] = ['patella_1', 'patella_2', 'tibia_1']
  144. # category['keypoint'] = [str(i + 1) for i in range(args.join_num)]
  145. self.categories.append(category)
  146. def to_coco(self, json_path_list):
  147. """
  148. Labelme 原始标签转换成 coco 数据集格式,生成的包括标签和图像
  149. Args:
  150. json_path_list:原始数据集的目录
  151. """
  152. [[1082.0, 408.0], [2478.0, 1888.0]]
  153. self._init_categories()
  154. for json_path in tqdm(json_path_list):
  155. obj = self.read_jsonfile(json_path) # 解析一个标注文件
  156. self.images.append(self._image(obj, json_path)) # 解析图片
  157. shapes = obj['shapes'] # 读取 labelme shape 标注
  158. bboxes_list, keypoints_list = [], []
  159. for shape in shapes:
  160. if shape['shape_type'] == 'rectangle':
  161. bboxes_list.append(shape) # bboxs
  162. elif shape['shape_type'] == 'point':
  163. keypoints_list.append(shape) # keypoints
  164. self._annotation(bboxes_list, keypoints_list, json_path)
  165. keypoints = {}
  166. keypoints['images'] = self.images
  167. keypoints['annotations'] = self.annotations
  168. keypoints['categories'] = self.categories
  169. return keypoints
  170. def init_dir(base_path):
  171. """
  172. 初始化COCO数据集的文件夹结构;
  173. coco - annotations #标注文件路径
  174. - train #训练数据集
  175. - val #验证数据集
  176. Args:
  177. base_path:数据集放置的根路径
  178. """
  179. if not os.path.exists(os.path.join(base_path, "coco", "annotations")):
  180. os.makedirs(os.path.join(base_path, "coco", "annotations"))
  181. if not os.path.exists(os.path.join(base_path, "coco", "train")):
  182. os.makedirs(os.path.join(base_path, "coco", "train"))
  183. if not os.path.exists(os.path.join(base_path, "coco", "val")):
  184. os.makedirs(os.path.join(base_path, "coco", "val"))
  185. if __name__ == '__main__':
  186. parser = argparse.ArgumentParser()
  187. parser.add_argument("--class_name", "--n", help="object class name",default='joint', type=str)
  188. parser.add_argument("--input", "--i", help="json file path (jsonfiles)",default='/home/zlhaha/WJ/joints_project/yolov8-pose/Project/dataset_raw/COCO/json_file_coco' ,type=str)
  189. parser.add_argument("--output", "--o", help="output file path (coco format)",default='/home/zlhaha/WJ/joints_project/deep-high-resolution-net.pytorch-master/data', type=str)
  190. parser.add_argument("--join_num", "--j", help="number of join", default=3 ,type=int)
  191. parser.add_argument("--ratio", "--r", help="train and test split ratio", type=float, default=0.1)
  192. args = parser.parse_args()
  193. labelme_path = args.input
  194. saved_coco_path = args.output
  195. init_dir(saved_coco_path) # 初始化COCO数据集的文件夹结构
  196. json_list_path = glob.glob(labelme_path + "/*.json")
  197. train_path, val_path = train_test_split(json_list_path, test_size=args.ratio)
  198. print('{} for training'.format(len(train_path)),
  199. '\n{} for testing'.format(len(val_path)))
  200. print('Start transform please wait ...')
  201. l2c_train = Labelme2coco_keypoints(args) # 构造数据集生成类
  202. # 生成训练集
  203. train_keypoints = l2c_train.to_coco(train_path)
  204. l2c_train.save_coco_json(train_keypoints, os.path.join(saved_coco_path, "coco", "annotations", "keypoints_train.json"))
  205. # 生成验证集
  206. l2c_val = Labelme2coco_keypoints(args)
  207. val_instance = l2c_val.to_coco(val_path)
  208. l2c_val.save_coco_json(val_instance, os.path.join(saved_coco_path, "coco", "annotations", "keypoints_val.json"))
  209. image_root_path = '/home/zlhaha/WJ/joints_project/yolov8-pose/Project/dataset_raw/COCO/dataset_coco'
  210. # 拷贝 labelme 的原始图片到训练集和验证集里面
  211. for file in train_path:
  212. img = os.path.join(image_root_path, file.split('/')[-1].replace("json", "jpg"))
  213. shutil.copy(img, os.path.join(saved_coco_path, "coco", "train"))
  214. for file in val_path:
  215. img = os.path.join(image_root_path, file.split('/')[-1].replace("json", "jpg"))
  216. shutil.copy(img, os.path.join(saved_coco_path, "coco", "val"))

三、开始改代码训练 HRnet

https://arxiv.org/abs/1904.04514

1、把数据放到data中,以这种格式

2、修改lib/dataset/coco.py 中关节点的类别 (我的是3)

self.num_joints = 3

3、!!!!!cocoeval.py 修改 523行self.kpt_oks_sigmas为自己数据集的,我的是3类。否则汇报以下错误

这里也需要修改成自己的类别数量 sigmas是关节的权重

4、修改np.float为float,原因是numpy版本问题

5、在yaml文件中把FLIP关掉,我用不到就关掉了,不然我的报错

FLIP_TEST FLIP: False 两个变量都设置为false 

6、plot_coco.py 修改

把前面的color等变量都改成自己的数量 我的是3

stick注释掉,我的任务不需要连接线

四:HRnet记录

1、解决路径问题:我要用到pycharm调试,而github代码是用终端跑,所以base路径不同,需要改,对base文件右键,mark directory root 然后把import完善一下

lib.

2、for i, (input, target, target_weight, meta) in enumerate(train_loader):

w32_384x288_adam_lr1e-3  input是288*384,网络的输出是72*96,target也是72*96,target_weight是可见不可见,mete是dict记录一些数据,没啥用

IMAGE_SIZE:
- 288
- 384
HEATMAP_SIZE:
- 72
- 96
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家自动化/article/detail/97799
推荐阅读
相关标签
  

闽ICP备14008679号