当前位置:   article > 正文

使用Mask R-CNN训练自己的数据_maskrcnn需要多少张图片

maskrcnn需要多少张图片

在使用Mask R-CNN训练自己的数据时,需要提前了解Mask R-CNN的标注工具以及跑通Mask R-CNN的Demo。下面的两篇博客分别介绍了Mask R-CNN标注工具以及如何跑通Mask R-CNN的Demo。

Mask RCNN标注工具

使用Keras与Tensorflow安装Mask RCNN并跑通Demo

在了解了Mask RCNN标注工具以及跑通了Demo后,我们要根据实际的问题来进行训练自己的Mask RCNN模型。由于数据集不方便公开,下面简单介绍训练的整个流程。

1   准备数据集

    训练Mask RCNN模型只需要4个文件夹的数据,分别是原图,json文件,生成的json文件夹以及label.png。

    注意:在使用labelme标注后生成的json文件夹中的label.png图片是16位的,但是Opencv读取16位的时候会出现错误,因此我们需要通过脚本将16位的label.png转为8位的,脚本大家可以在这里下载。labelme16位转8位下载

文件夹数据:

4个文件夹中的内容分别是

cv2_mask                                                       json

 

labelme_json                                                 pic

2  训练模型

准备好数据集后,我们就需要去训练我的模型。

训练数据的源代码:

  1. # -*- coding: utf-8 -*-
  2. import os
  3. import sys
  4. import random
  5. import math
  6. import re
  7. import time
  8. import numpy as np
  9. import cv2
  10. import matplotlib
  11. import matplotlib.pyplot as plt
  12. import tensorflow as tf
  13. from mrcnn.config import Config
  14. #import utils
  15. from mrcnn import model as modellib,utils
  16. from mrcnn import visualize
  17. import yaml
  18. from mrcnn.model import log
  19. from PIL import Image
  20. #os.environ["CUDA_VISIBLE_DEVICES"] = "0"
  21. # Root directory of the project
  22. ROOT_DIR = os.getcwd()#得到该.py的绝对路径
  23. #ROOT_DIR = os.path.abspath("../")
  24. # Directory to save logs and trained model
  25. MODEL_DIR = os.path.join(ROOT_DIR, "logs")
  26. iter_num=0
  27. # Local path to trained weights file
  28. COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
  29. # Download COCO trained weights from Releases if needed
  30. if not os.path.exists(COCO_MODEL_PATH):
  31. utils.download_trained_weights(COCO_MODEL_PATH)
  32. class ShapesConfig(Config):
  33. """Configuration for training on the toy shapes dataset.
  34. Derives from the base Config class and overrides values specific
  35. to the toy shapes dataset.
  36. """
  37. # Give the configuration a recognizable name
  38. NAME = "shapes"
  39. # Train on 1 GPU and 8 images per GPU. We can put multiple images on each
  40. # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
  41. GPU_COUNT = 1
  42. IMAGES_PER_GPU = 2
  43. # Number of classes (including background)
  44. NUM_CLASSES = 1 + 1 # background + 3 shapes
  45. # Use small images for faster training. Set the limits of the small side
  46. # the large side, and that determines the image shape.
  47. IMAGE_MIN_DIM = 320
  48. IMAGE_MAX_DIM = 384
  49. # Use smaller anchors because our image and objects are small
  50. RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels
  51. # Reduce training ROIs per image because the images are small and have
  52. # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
  53. TRAIN_ROIS_PER_IMAGE = 100
  54. # Use a small epoch since the data is simple
  55. STEPS_PER_EPOCH = 100
  56. # use small validation steps since the epoch is small
  57. VALIDATION_STEPS = 50
  58. config = ShapesConfig()
  59. config.display()
  60. class DrugDataset(utils.Dataset):
  61. # 得到该图中有多少个实例(物体)
  62. def get_obj_index(self, image):
  63. n = np.max(image)
  64. return n
  65. # 解析labelme中得到的yaml文件,从而得到mask每一层对应的实例标签
  66. def from_yaml_get_class(self, image_id):
  67. info = self.image_info[image_id]
  68. with open(info['yaml_path']) as f:
  69. temp = yaml.load(f.read())
  70. labels = temp['label_names']
  71. del labels[0]
  72. return labels
  73. # 重新写draw_mask
  74. def draw_mask(self, num_obj, mask, image,image_id):
  75. #print("draw_mask-->",image_id)
  76. #print("self.image_info",self.image_info)
  77. info = self.image_info[image_id]
  78. #print("info-->",info)
  79. #print("info[width]----->",info['width'],"-info[height]--->",info['height'])
  80. for index in range(num_obj):
  81. for i in range(info['width']):
  82. for j in range(info['height']):
  83. #print("image_id-->",image_id,"-i--->",i,"-j--->",j)
  84. #print("info[width]----->",info['width'],"-info[height]--->",info['height'])
  85. at_pixel = image.getpixel((i, j))
  86. if at_pixel == index + 1:
  87. mask[j, i, index] = 1
  88. return mask
  89. # 重新写load_shapes,里面包含自己的类别,可以任意添加
  90. # 并在self.image_info信息中添加了path、mask_path 、yaml_path
  91. # yaml_pathdataset_root_path = "/tongue_dateset/"
  92. # img_floder = dataset_root_path + "rgb"
  93. # mask_floder = dataset_root_path + "mask"
  94. # dataset_root_path = "/tongue_dateset/"
  95. def load_shapes(self, count, img_floder, mask_floder, imglist, dataset_root_path):
  96. """Generate the requested number of synthetic images.
  97. count: number of images to generate.
  98. height, width: the size of the generated images.
  99. """
  100. # Add classes,可通过这种方式扩展多个物体
  101. self.add_class("shapes", 1, "package") # 包裹
  102. for i in range(count):
  103. # 获取图片宽和高
  104. filestr = imglist[i].split(".")[0]
  105. #print(imglist[i],"-->",cv_img.shape[1],"--->",cv_img.shape[0])
  106. #print("id-->", i, " imglist[", i, "]-->", imglist[i],"filestr-->",filestr)
  107. #filestr = filestr.split("_")[1]
  108. mask_path = mask_floder + "/" + filestr + ".png"
  109. yaml_path = dataset_root_path + "labelme_json/" + filestr + "_json/info.yaml"
  110. print(dataset_root_path + "labelme_json/" + filestr + "_json/img.png")
  111. cv_img = cv2.imread(dataset_root_path + "labelme_json/" + filestr + "_json/img.png")
  112. self.add_image("shapes", image_id=i, path=img_floder + "/" + imglist[i],
  113. width=cv_img.shape[1], height=cv_img.shape[0], mask_path=mask_path, yaml_path=yaml_path)
  114. # 重写load_mask
  115. def load_mask(self, image_id):
  116. """Generate instance masks for shapes of the given image ID.
  117. """
  118. global iter_num
  119. print("image_id",image_id)
  120. info = self.image_info[image_id]
  121. count = 1 # number of object
  122. img = Image.open(info['mask_path'])
  123. num_obj = self.get_obj_index(img)
  124. mask = np.zeros([info['height'], info['width'], num_obj], dtype=np.uint8)
  125. mask = self.draw_mask(num_obj, mask, img,image_id)
  126. occlusion = np.logical_not(mask[:, :, -1]).astype(np.uint8)
  127. for i in range(count - 2, -1, -1):
  128. mask[:, :, i] = mask[:, :, i] * occlusion
  129. occlusion = np.logical_and(occlusion, np.logical_not(mask[:, :, i]))
  130. labels = []
  131. labels = self.from_yaml_get_class(image_id)
  132. labels_form = []
  133. for i in range(len(labels)):
  134. if labels[i].find("tank") != -1:
  135. # print "box"
  136. labels_form.append("tank")
  137. elif labels[i].find("triangle")!=-1:
  138. #print "column"
  139. labels_form.append("triangle")
  140. elif labels[i].find("white")!=-1:
  141. #print "package"
  142. labels_form.append("white")
  143. class_ids = np.array([self.class_names.index(s) for s in labels_form])
  144. return mask, class_ids.astype(np.int32)
  145. def get_ax(rows=1, cols=1, size=8):
  146. """Return a Matplotlib Axes array to be used in
  147. all visualizations in the notebook. Provide a
  148. central point to control graph sizes.
  149. Change the default size attribute to control the size
  150. of rendered images
  151. """
  152. _, ax = plt.subplots(rows, cols, figsize=(size * cols, size * rows))
  153. return ax
  154. #基础设置
  155. dataset_root_path="train_data/"#含有4个文件夹的主目录
  156. img_floder = dataset_root_path + "pic"
  157. mask_floder = dataset_root_path + "cv2_mask"
  158. #yaml_floder = dataset_root_path
  159. imglist = os.listdir(img_floder)
  160. count = len(imglist)
  161. #train与val数据集准备
  162. dataset_train = DrugDataset()
  163. dataset_train.load_shapes(count, img_floder, mask_floder, imglist,dataset_root_path)
  164. dataset_train.prepare()
  165. #print("dataset_train-->",dataset_train._image_ids)
  166. dataset_val = DrugDataset()
  167. dataset_val.load_shapes(7, img_floder, mask_floder, imglist,dataset_root_path)
  168. dataset_val.prepare()
  169. #print("dataset_val-->",dataset_val._image_ids)
  170. # Load and display random samples
  171. #image_ids = np.random.choice(dataset_train.image_ids, 4)
  172. #for image_id in image_ids:
  173. # image = dataset_train.load_image(image_id)
  174. # mask, class_ids = dataset_train.load_mask(image_id)
  175. # visualize.display_top_masks(image, mask, class_ids, dataset_train.class_names)
  176. # Create model in training mode
  177. model = modellib.MaskRCNN(mode="training", config=config,
  178. model_dir=MODEL_DIR)
  179. # Which weights to start with?
  180. init_with = "coco" # imagenet, coco, or last
  181. if init_with == "imagenet":
  182. model.load_weights(model.get_imagenet_weights(), by_name=True)
  183. elif init_with == "coco":
  184. # Load weights trained on MS COCO, but skip layers that
  185. # are different due to the different number of classes
  186. # See README for instructions to download the COCO weights
  187. model.load_weights(COCO_MODEL_PATH, by_name=True,
  188. exclude=["mrcnn_class_logits", "mrcnn_bbox_fc",
  189. "mrcnn_bbox", "mrcnn_mask"])
  190. elif init_with == "last":
  191. # Load the last model you trained and continue training
  192. model.load_weights(model.find_last()[1], by_name=True)
  193. # Train the head branches
  194. # Passing layers="heads" freezes all layers except the head
  195. # layers. You can also pass a regular expression to select
  196. # which layers to train by name pattern.
  197. model.train(dataset_train, dataset_val,
  198. learning_rate=config.LEARNING_RATE,
  199. epochs=20,
  200. layers='heads')
  201. # Fine tune all layers
  202. # Passing layers="all" trains all layers. You can also
  203. # pass a regular expression to select which layers to
  204. # train by name pattern.
  205. model.train(dataset_train, dataset_val,
  206. learning_rate=config.LEARNING_RATE / 10,
  207. epochs=40,
  208. layers="all")

训练的结果图:

3  在经过训练后,通过测试可以观察我们模型的效果,训练的次数比较少,效果一般。

测试代码如下图所示:

  1. # -*- coding: utf-8 -*-
  2. import os
  3. import sys
  4. import random
  5. import math
  6. import numpy as np
  7. import skimage.io
  8. import matplotlib
  9. import matplotlib.pyplot as plt
  10. import cv2
  11. import time
  12. from mrcnn.config import Config
  13. from datetime import datetime
  14. # Root directory of the project
  15. ROOT_DIR = os.getcwd()
  16. # Import Mask RCNN
  17. sys.path.append(ROOT_DIR) # To find local version of the library
  18. from mrcnn import utils
  19. import mrcnn.model as modellib
  20. from mrcnn import visualize
  21. # Import COCO config
  22. sys.path.append(os.path.join(ROOT_DIR, "samples/coco/")) # To find local version
  23. from samples.coco import coco
  24. # Directory to save logs and trained model
  25. MODEL_DIR = os.path.join(ROOT_DIR, "logs")
  26. # Local path to trained weights file
  27. COCO_MODEL_PATH = os.path.join(MODEL_DIR ,"mask_rcnn_coco.h5")
  28. # Download COCO trained weights from Releases if needed
  29. if not os.path.exists(COCO_MODEL_PATH):
  30. utils.download_trained_weights(COCO_MODEL_PATH)
  31. print("cuiwei***********************")
  32. # Directory of images to run detection on
  33. IMAGE_DIR = os.path.join(ROOT_DIR, "images")
  34. class ShapesConfig(Config):
  35. """Configuration for training on the toy shapes dataset.
  36. Derives from the base Config class and overrides values specific
  37. to the toy shapes dataset.
  38. """
  39. # Give the configuration a recognizable name
  40. NAME = "shapes"
  41. # Train on 1 GPU and 8 images per GPU. We can put multiple images on each
  42. # GPU because the images are small. Batch size is 8 (GPUs * images/GPU).
  43. GPU_COUNT = 1
  44. IMAGES_PER_GPU = 1
  45. # Number of classes (including background)
  46. NUM_CLASSES = 1 + 3 # background + 3 shapes
  47. # Use small images for faster training. Set the limits of the small side
  48. # the large side, and that determines the image shape.
  49. IMAGE_MIN_DIM = 320
  50. IMAGE_MAX_DIM = 384
  51. # Use smaller anchors because our image and objects are small
  52. RPN_ANCHOR_SCALES = (8 * 6, 16 * 6, 32 * 6, 64 * 6, 128 * 6) # anchor side in pixels
  53. # Reduce training ROIs per image because the images are small and have
  54. # few objects. Aim to allow ROI sampling to pick 33% positive ROIs.
  55. TRAIN_ROIS_PER_IMAGE =100
  56. # Use a small epoch since the data is simple
  57. STEPS_PER_EPOCH = 100
  58. # use small validation steps since the epoch is small
  59. VALIDATION_STEPS = 50
  60. #import train_tongue
  61. #class InferenceConfig(coco.CocoConfig):
  62. class InferenceConfig(ShapesConfig):
  63. # Set batch size to 1 since we'll be running inference on
  64. # one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
  65. GPU_COUNT = 1
  66. IMAGES_PER_GPU = 1
  67. config = InferenceConfig()
  68. model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
  69. # Create model object in inference mode.
  70. model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
  71. # Load weights trained on MS-COCO
  72. model.load_weights(COCO_MODEL_PATH, by_name=True)
  73. # COCO Class names
  74. # Index of the class in the list is its ID. For example, to get ID of
  75. # the teddy bear class, use: class_names.index('teddy bear')
  76. class_names = ['BG', 'package']
  77. # Load a random image from the images folder
  78. file_names = next(os.walk(IMAGE_DIR))[2]
  79. image = skimage.io.imread(os.path.join(IMAGE_DIR, random.choice(file_names)))
  80. a=datetime.now()
  81. # Run detection
  82. results = model.detect([image], verbose=1)
  83. b=datetime.now()
  84. # Visualize results
  85. print("shijian",(b-a).seconds)
  86. r = results[0]
  87. visualize.display_instances(image, r['rois'], r['masks'], r['class_ids'],
  88. class_names, r['scores'])

参考博客:

https://blog.csdn.net/qq_29462849/article/details/81037343

https://blog.csdn.net/l297969586/article/details/79140840/

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/繁依Fanyi0/article/detail/300400
推荐阅读
相关标签
  

闽ICP备14008679号