赞
踩
本文主要阐述模型评估相关内容以及实际项目中遇到的问题和决办法。
模型性能评估:
当我们完成网络模型训练以后,便希望能看一下模型的分割性能如何。模型性功评估是一个过程,需要对多输入/输出进行汇总总结。Detectron2给我们提供了一些使用标准数据集特定的API(例如COCO,LVIS)的接口,以便于我们来计算性能指标。只要我们自己的数据集满足Detectron2要求的DatasetEvaluator数据集格式,那么就可以利用其提供的接口来评估我们训练好的模型性能。Detectron2提供了两个接口供我们选择:
(1)直接使用模型
(2)DatasetEvaluator
本项目使用的是DatasetEvaluator接口。具体调用步骤如下如下:
register_coco_instances("test", {}, "../datasets/"
"project/"
"test_coco/"
"test_coco.json", ".")
MetadataCatalog.get("test")
cfg.merge_from_file("../configs/project/mask_rcnn_R_101_FPN_3x.yaml")
cfg.DATASETS.TEST = ("test",)
cfg.MODEL.WEIGHTS = os.path.join(cfg.OUTPUT_DIR, "final_model.pth")
evaluator = COCOEvaluator("test", ("bbox", "segm"), False, output_dir="../output/")
val_loader = build_detection_test_loader(cfg, "test")
print(inference_on_dataset(trainer.model, val_loader, evaluator))
按照上述步骤进行模型评估,发现有些自定义的类别的AP=0。画出模型训练过程中各参数的变化曲线如下图所示,可以发现,各loss值随着训练迭代次数的增加而减小,cls_accuracy随着训练迭代次数的增加而逐渐趋近于1。
那么是什么原因导致有些类别的AP=0呢?!!经检查发现,已训练好的模型中自定义的类别id与测试集中的类别id并没有一一对应。在我这篇文章中有提到,Detectron2提供了生成coco数据集格式的文件,但,在生成测试集的.json文件时,并不能保证与训练集中类别的id一一对应上。所以,我们这里将对其源码进行更改,完整代码如下:
import time import os import json import PIL.Image import PIL.ImageDraw import numpy as np from labelme2coco.utils import create_dir, list_jsons_recursively from labelme2coco.image_utils import read_image_shape_as_dict # 定义将labelme数据集转化为coco数据集格式的类 class labelme2coco(object): def __init__(self, labelme_folder='', save_json_path='./new.json'): """ Args: labelme_folder: folder that contains labelme annotations and image files save_json_path: path for coco json to be saved """ self.save_json_path = save_json_path self.images = [] self.categories = [] self.annotations = [] self.label = [] self.annID = 1 self.height = 0 self.width = 0 # create save dir save_json_dir = os.path.dirname(save_json_path) create_dir(save_json_dir) # get json list _, labelme_json = list_jsons_recursively(labelme_folder) self.labelme_json = labelme_json self.save_json() def data_transfer(self): for num, json_path in enumerate(self.labelme_json): with open(json_path, 'r') as fp: # load json data = json.load(fp) # (prefix, res) = os.path.split(json_path) # (file_name, extension) = os.path.splitext(res) self.images.append(self.image(data, num, json_path)) for shapes in data['shapes']: label = shapes['label'] if label not in self.label: self.categories.append(self.category(label)) self.label.append(label) points = shapes['points'] self.annotations.append(self.annotation(points, label, num)) self.annID += 1 def image(self, data, num, json_path): image = {} # get image path _, img_extension = os.path.splitext(data["imagePath"]) image_path = json_path.replace(".json", img_extension) img_shape = read_image_shape_as_dict(image_path) height, width = img_shape['height'], img_shape['width'] image['height'] = height image['width'] = width image['id'] = int(num + 1) image['file_name'] = image_path self.height = height self.width = width return image def category(self, label): category = {} category['supercategory'] = label # category['id'] = int(len(self.label) + 1) # category['name'] = label # 上面两行为源代码内容,这里将其注释掉,添加自己的分类("****"为自定义类别名称),保持与训练模型时生成的.json文件中的categories['id']、category['name']保持一致 if label == "****": category['id'] = 1 category['name'] = label elif label == "****": category['id'] = 2 category['name'] = label elif label == "****": category['id'] = 3 category['name'] = label return category def annotation(self, points, label, num): annotation = {} annotation['iscrowd'] = 0 annotation['image_id'] = int(num + 1) annotation['bbox'] = list(map(float, self.getbbox(points))) # coarsely from bbox to segmentation x = annotation['bbox'][0] y = annotation['bbox'][1] w = annotation['bbox'][2] h = annotation['bbox'][3] annotation['segmentation'] = [np.asarray(points).flatten().tolist()] annotation['category_id'] = self.getcatid(label) annotation['id'] = int(self.annID) # add area info annotation[ 'area'] = self.height * self.width # the area is not used for detection return annotation def getcatid(self, label): for categorie in self.categories: if label == categorie['name']: return categorie['id'] # if label[1]==categorie['name']: # return categorie['id'] return -1 def getbbox(self, points): polygons = points mask = self.polygons_to_mask([self.height, self.width], polygons) return self.mask2box(mask) def mask2box(self, mask): # np.where(mask==1) index = np.argwhere(mask == 1) rows = index[:, 0] clos = index[:, 1] left_top_r = np.min(rows) # y left_top_c = np.min(clos) # x right_bottom_r = np.max(rows) right_bottom_c = np.max(clos) return [left_top_c, left_top_r, right_bottom_c - left_top_c, right_bottom_r - left_top_r] # [x1,y1,w,h] for coco box format def polygons_to_mask(self, img_shape, polygons): mask = np.zeros(img_shape, dtype=np.uint8) mask = PIL.Image.fromarray(mask) xy = list(map(tuple, polygons)) PIL.ImageDraw.Draw(mask).polygon(xy=xy, outline=1, fill=1) mask = np.array(mask, dtype=bool) return mask def data2coco(self): data_coco = {} data_coco['images'] = self.images data_coco['categories'] = self.categories data_coco['annotations'] = self.annotations return data_coco def save_json(self): self.data_transfer() self.data_coco = self.data2coco() json.dump(self.data_coco, open(self.save_json_path, 'w', encoding='utf-8'), indent=4, separators=(',', ': '), cls=MyEncoder) # type check when save json files class MyEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(MyEncoder, self).default(obj) if __name__ == "__main__": labelme_folder = "../datasets/project/test/" # 测试集路径 save_json_path = "../datasets/project/test_coco/test_coco.json" # 生成的.json文件路径 labelme2coco(labelme_folder, save_json_path)
评估结果如下图所示:
bbox:
从图中可以看出:
(1) IoU=0.5:0.95表示IoU大于0.5~0.95的平均,其值为0.676;
(2) IoU=0.5表示IoU大于0.5的平均,其值为0.803;
(3) IoU=0.75表示IoU大于0.75的平均,其值为0.737;
segm:
从图中可以看出:
(1) IoU=0.5:0.95表示IoU大于0.5~0.95的平均,其值为0.453;
(2) IoU=0.5表示IoU大于0.5的平均,其值为0.700;
(3) IoU=0.75表示IoU大于0.75的平均,其值为0.445;
结果还是不错的。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。