当前位置:   article > 正文

基于YOLOV8-pose的自动标注_yolov8 pose 标签

yolov8 pose 标签

解析JSON文件

  1. {
  2. "version": "5.1.1", # 字典
  3. "flags": {}, # 字典
  4. "shapes": [ # 列表嵌套字典
  5. {
  6. "label": "person", # 字典
  7. "points": [ # 字典
  8. [
  9. 1184.0,
  10. 145.0
  11. ],
  12. [
  13. 1563.0,
  14. 743.0
  15. ]
  16. ],
  17. "group_id": 1, # 字典
  18. "shape_type": "rectangle", # 字典
  19. "flags": {} # 字典
  20. },
  21. {
  22. "label": "0",
  23. "points": [
  24. [
  25. 1519.164794921875,
  26. 211.12527465820312
  27. ]
  28. ],
  29. "group_id": 1,
  30. "shape_type": "point", #区别 框的话是rectangle 点的话就是point
  31. "flags": {}
  32. },
  33. ......
  34. {
  35. "label": "16",
  36. "points": [
  37. [
  38. 1305.552734375,
  39. 662.0692138671875
  40. ]
  41. ],
  42. "group_id": 1,
  43. "shape_type": "point",
  44. "flags": {}
  45. }
  46. ],
  47. "imagePath": "22112622_003270.jpg", # 字典 文件名
  48. "imageData": "......." # base64编码 最重要的一项
  49. "imageHeight": 1080, # 照片高 字典
  50. "imageWidth": 1920 # 照片宽 字典
  51. }

现在解析了JSON,之后就要分析怎么在yolov8-pose中取出我想要的值

从YOLOV8-pose取出对应的信息

  1. def postprocess(self, preds, img, orig_imgs):
  2. """Return detection results for a given input image or list of images."""
  3. preds = ops.non_max_suppression(preds,
  4. self.args.conf,
  5. self.args.iou,
  6. agnostic=self.args.agnostic_nms,
  7. max_det=self.args.max_det,
  8. classes=self.args.classes,
  9. nc=len(self.model.names))
  10. if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
  11. orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
  12. torch.set_printoptions(sci_mode=False)
  13. results = []
  14. for i, pred in enumerate(preds):
  15. orig_img = orig_imgs[i]
  16. # 对应的全部信息
  17. pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape).round()
  18. # 对应的点坐标
  19. pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
  20. pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
  21. img_path = self.batch[0][i]
  22. results.append(
  23. Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts))
  24. return results

上述代码在 ultralytics/ultralytics/models/yolo/pose/predict.py中

yolov8-pose骨架信息图

可以根据此骨架关键点去做修改,改成自己需要的骨架信息

实现标注自动化

一张图片对应一个JSON文件,后面会有所改变,但是前面都一样,所以可以先把大概的轮廓搭建起来

  1. # 创建一个字典
  2. # 把原始字节码编码成base64字节码
  3. base64_bytes = base64.b64encode(byte_content)
  4. # 把base64字节码解码成utf-8格式的字符串
  5. base64_string = base64_bytes.decode('utf-8')
  6. data = {
  7. "version": "5.1.1",
  8. "flags": {},
  9. "shapes": [
  10. ]
  11. }
  12. json_data = json.dumps(data, indent=4)
  13. # img_path在源代码有 是图片的绝对路径
  14. #这里是为了imagePath做准备以及保证保存的JSON文件与原文件同名
  15. imgName = img_path.split("/")[-1]
  16. savePath = "/home/test/fall/" + imgName.split(".")[0] + ".json"
  17. #将上述的字典写入我们创建的json文件中
  18. with open(savePath, 'w') as file:
  19. file.write(json_data)

生成base64_string是借鉴这篇博客https://blog.csdn.net/nodototao/article/details/123800645?spm=1001.2101.3001.6650.2&utm_medium=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.pc_relevant_default&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2~default~CTRLIST~default-2.pc_relevant_default&utm_relevant_index=5

那么接下来就是将点的信息、框的信息写入到json文件中

  1. for j in range(pred.shape[0]):
  2. # 将字典转换为JSON格式
  3. # 打开文件,并将JSON数据写入文件
  4. with open(savePath, 'r') as f:
  5. data = json.load(f)
  6. # 在字典列表中插入新的字典
  7. # 更新组信息
  8. bbox = {"label": "person", 'points': [[float(pred[j][0].item()), float(pred[j][1].item())],
  9. [float(pred[j][2].item()), float(pred[j][3].item())]], "group_id": (j + 1),
  10. "shape_type": "rectangle", "flags": {}}
  11. data['shapes'].append(bbox)
  12. data.update({"imagePath": imgName})
  13. data.update({"imageData": base64_string})
  14. # data.update({"imageData": str(base64.b64encode(open(img_path, "rb").read()))})
  15. data.update({"imageHeight": orig_img.shape[0]})
  16. data.update({"imageWidth": orig_img.shape[1]})
  17. for i in range(17):
  18. if float(pred_kpts[j][i][2].item()) > 0.5:
  19. # print(float(pred_kpts[:, i, 0]), float(pred_kpts[:, i, 1]))
  20. keypoints = {'label': str(i),
  21. 'points': [[float(pred_kpts[j][i][0].item()), float(pred_kpts[j][i][1].item())]],
  22. 'group_id': (j + 1), 'shape_type': 'point', 'flags': {}}
  23. data['shapes'].append(keypoints)
  24. # 将更新后的数据写回到JSON文件
  25. with open(savePath, 'w') as f:
  26. json.dump(data, f, indent=4)

完整代码

  1. def postprocess(self, preds, img, orig_imgs):
  2. """Return detection results for a given input image or list of images."""
  3. preds = ops.non_max_suppression(preds,
  4. self.args.conf,
  5. self.args.iou,
  6. agnostic=self.args.agnostic_nms,
  7. max_det=self.args.max_det,
  8. classes=self.args.classes,
  9. nc=len(self.model.names))
  10. if not isinstance(orig_imgs, list): # input images are a torch.Tensor, not a list
  11. orig_imgs = ops.convert_torch2numpy_batch(orig_imgs)
  12. torch.set_printoptions(sci_mode=False)
  13. results = []
  14. for i, pred in enumerate(preds):
  15. orig_img = orig_imgs[i]
  16. pred[:, :4] = ops.scale_boxes(img.shape[2:], pred[:, :4], orig_img.shape).round()
  17. pred_kpts = pred[:, 6:].view(len(pred), *self.model.kpt_shape) if len(pred) else pred[:, 6:]
  18. pred_kpts = ops.scale_coords(img.shape[2:], pred_kpts, orig_img.shape)
  19. img_path = self.batch[0][i]
  20. with open(img_path, 'rb') as jpg_file:
  21. byte_content = jpg_file.read()
  22. # 把原始字节码编码成base64字节码
  23. base64_bytes = base64.b64encode(byte_content)
  24. # 把base64字节码解码成utf-8格式的字符串
  25. base64_string = base64_bytes.decode('utf-8')
  26. import json
  27. # 创建一个字典
  28. data = {
  29. "version": "5.1.1",
  30. "flags": {},
  31. "shapes": [
  32. ]
  33. }
  34. json_data = json.dumps(data, indent=4)
  35. imgName = img_path.split("/")[-1]
  36. print(img_path)
  37. savePath = "/home/ebo/test/high/_merge/" + imgName.split(".")[0] + ".json"
  38. with open(savePath, 'w') as file:
  39. file.write(json_data)
  40. for j in range(pred.shape[0]):
  41. # 将字典转换为JSON格式
  42. # 打开文件,并将JSON数据写入文件
  43. with open(savePath, 'r') as f:
  44. data = json.load(f)
  45. # 在字典列表中插入新的字典
  46. bbox = {"label": "person", 'points': [[float(pred[j][0].item()), float(pred[j][1].item())],
  47. [float(pred[j][2].item()), float(pred[j][3].item())]],
  48. "group_id": (j + 1),
  49. "shape_type": "rectangle", "flags": {}}
  50. data['shapes'].append(bbox)
  51. data.update({"imagePath": imgName})
  52. data.update({"imageData": base64_string})
  53. # data.update({"imageData": str(base64.b64encode(open(img_path, "rb").read()))})
  54. data.update({"imageHeight": orig_img.shape[0]})
  55. data.update({"imageWidth": orig_img.shape[1]})
  56. for i in range(17):
  57. if float(pred_kpts[j][i][2].item()) > 0.5:
  58. # print(float(pred_kpts[:, i, 0]), float(pred_kpts[:, i, 1]))
  59. keypoints = {'label': str(i),
  60. 'points': [[float(pred_kpts[j][i][0].item()), float(pred_kpts[j][i][1].item())]],
  61. 'group_id': (j + 1), 'shape_type': 'point', 'flags': {}}
  62. # keypoints = {'label': i, 'points': 0, 'group_id': 1, 'shape_type': 'point', 'flags': {}}
  63. # print(keypoints)da
  64. data['shapes'].append(keypoints)
  65. # 将更新后的数据写回到JSON文件
  66. with open(savePath, 'w') as f:
  67. json.dump(data, f, indent=4)
  68. # print(pred)
  69. results.append(
  70. Results(orig_img, path=img_path, names=self.model.names, boxes=pred[:, :6], keypoints=pred_kpts))
  71. return results

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/不正经/article/detail/213417
推荐阅读
相关标签
  

闽ICP备14008679号