当前位置:   article > 正文

跑自己的点云数据——详细代码与注解_inctance avg iou

inctance avg iou

应之前小伙伴的要求,这里给出详细的修改后的点云分割代码,可以跑通自己的点云数据。

首先给出训练部分的代码:(这里用到的是:train——partseg)

  1. import argparse
  2. import os
  3. from data_utils.ShapeNetDataLoader import PartNormalDataset
  4. import torch
  5. import datetime
  6. import logging
  7. from pathlib import Path
  8. import sys
  9. import importlib
  10. import shutil
  11. from tqdm import tqdm
  12. import provider
  13. import numpy as np
  14. """
  15. 训练所需设置参数:
  16. --model pointnet2_part_seg_msg
  17. --normal
  18. --log_dir pointnet2_part_seg_msg
  19. """
  20. BASE_DIR = os.path.dirname(os.path.abspath(__file__))
  21. ROOT_DIR = BASE_DIR
  22. sys.path.append(os.path.join(ROOT_DIR, 'models'))
  23. ##seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43], 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
  24. seg_classes = {'Airplane': [0, 1], 'Mug': [2, 3]}
  25. seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
  26. for cat in seg_classes.keys():
  27. for label in seg_classes[cat]:
  28. seg_label_to_cat[label] = cat
  29. def to_categorical(y, num_classes):
  30. """ 1-hot encodes a tensor """
  31. new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
  32. if (y.is_cuda):
  33. return new_y.cuda()
  34. return new_y
  35. def parse_args():
  36. parser = argparse.ArgumentParser('Model')
  37. parser.add_argument('--model', type=str, default='pointnet2_part_seg_msg', help='model name [default: pointnet2_part_seg_msg]')
  38. parser.add_argument('--batch_size', type=int, default=4, help='Batch Size during training [default: 16]')
  39. parser.add_argument('--epoch', default=251, type=int, help='Epoch to run [default: 251]')
  40. parser.add_argument('--learning_rate', default=0.001, type=float, help='Initial learning rate [default: 0.001]')
  41. parser.add_argument('--gpu', type=str, default='0', help='GPU to use [default: GPU 0]')
  42. parser.add_argument('--optimizer', type=str, default='Adam', help='Adam or SGD [default: Adam]')
  43. parser.add_argument('--log_dir', type=str, default=None, help='Log path [default: None]')
  44. parser.add_argument('--decay_rate', type=float, default=1e-4, help='weight decay [default: 1e-4]')
  45. parser.add_argument('--npoint', type=int, default=2048, help='Point Number [default: 2048]')
  46. parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')
  47. parser.add_argument('--step_size', type=int, default=2, help='Decay step for lr decay [default: every 20 epochs]')
  48. parser.add_argument('--lr_decay', type=float, default=0.5, help='Decay rate for lr decay [default: 0.5]')
  49. return parser.parse_args()
  50. def main(args):
  51. def log_string(str):
  52. logger.info(str)
  53. print(str)
  54. # '''HYPER PARAMETER'''
  55. # os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
  56. '''CREATE DIR'''
  57. timestr = str(datetime.datetime.now().strftime('%Y-%m-%d_%H-%M'))
  58. experiment_dir = Path('./log/')
  59. experiment_dir.mkdir(exist_ok=True)
  60. experiment_dir = experiment_dir.joinpath('part_seg')
  61. experiment_dir.mkdir(exist_ok=True)
  62. if args.log_dir is None:
  63. experiment_dir = experiment_dir.joinpath(timestr)
  64. else:
  65. experiment_dir = experiment_dir.joinpath(args.log_dir)
  66. experiment_dir.mkdir(exist_ok=True)
  67. checkpoints_dir = experiment_dir.joinpath('checkpoints/')
  68. checkpoints_dir.mkdir(exist_ok=True)
  69. log_dir = experiment_dir.joinpath('logs/')
  70. log_dir.mkdir(exist_ok=True)
  71. '''LOG'''
  72. args = parse_args()
  73. logger = logging.getLogger("Model")
  74. logger.setLevel(logging.INFO)
  75. formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  76. file_handler = logging.FileHandler('%s/%s.txt' % (log_dir, args.model))
  77. file_handler.setLevel(logging.INFO)
  78. file_handler.setFormatter(formatter)
  79. logger.addHandler(file_handler)
  80. log_string('PARAMETER ...')
  81. log_string(args)
  82. root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'
  83. TRAIN_DATASET = PartNormalDataset(root = root, npoints=args.npoint, split='trainval', normal_channel=args.normal)
  84. trainDataLoader = torch.utils.data.DataLoader(TRAIN_DATASET, batch_size=args.batch_size,shuffle=True, num_workers=4)
  85. TEST_DATASET = PartNormalDataset(root = root, npoints=args.npoint, split='test', normal_channel=args.normal)
  86. testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size,shuffle=False, num_workers=4)
  87. log_string("The number of training data is: %d" % len(TRAIN_DATASET))
  88. log_string("The number of test data is: %d" % len(TEST_DATASET))
  89. num_classes = 2
  90. num_part = 4
  91. '''MODEL LOADING'''
  92. MODEL = importlib.import_module(args.model)
  93. shutil.copy('models/%s.py' % args.model, str(experiment_dir))
  94. shutil.copy('models/pointnet_util.py', str(experiment_dir))
  95. classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
  96. criterion = MODEL.get_loss().cuda()
  97. def weights_init(m):
  98. classname = m.__class__.__name__
  99. if classname.find('Conv2d') != -1:
  100. torch.nn.init.xavier_normal_(m.weight.data)
  101. torch.nn.init.constant_(m.bias.data, 0.0)
  102. elif classname.find('Linear') != -1:
  103. torch.nn.init.xavier_normal_(m.weight.data)
  104. torch.nn.init.constant_(m.bias.data, 0.0)
  105. try:
  106. checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
  107. start_epoch = checkpoint['epoch']
  108. classifier.load_state_dict(checkpoint['model_state_dict'])
  109. log_string('Use pretrain model')
  110. except:
  111. log_string('No existing model, starting training from scratch...')
  112. start_epoch = 0
  113. classifier = classifier.apply(weights_init)
  114. if args.optimizer == 'Adam':
  115. optimizer = torch.optim.Adam(
  116. classifier.parameters(),
  117. lr=args.learning_rate,
  118. betas=(0.9, 0.999),
  119. eps=1e-08,
  120. weight_decay=args.decay_rate
  121. )
  122. else:
  123. optimizer = torch.optim.SGD(classifier.parameters(), lr=args.learning_rate, momentum=0.9)
  124. def bn_momentum_adjust(m, momentum):
  125. if isinstance(m, torch.nn.BatchNorm2d) or isinstance(m, torch.nn.BatchNorm1d):
  126. m.momentum = momentum
  127. LEARNING_RATE_CLIP = 1e-5
  128. MOMENTUM_ORIGINAL = 0.1
  129. MOMENTUM_DECCAY = 0.5
  130. MOMENTUM_DECCAY_STEP = args.step_size
  131. best_acc = 0
  132. global_epoch = 0
  133. best_class_avg_iou = 0
  134. best_inctance_avg_iou = 0
  135. for epoch in range(start_epoch,args.epoch):
  136. log_string('Epoch %d (%d/%s):' % (global_epoch + 1, epoch + 1, args.epoch))
  137. '''Adjust learning rate and BN momentum'''
  138. lr = max(args.learning_rate * (args.lr_decay ** (epoch // args.step_size)), LEARNING_RATE_CLIP)
  139. log_string('Learning rate:%f' % lr)
  140. for param_group in optimizer.param_groups:
  141. param_group['lr'] = lr
  142. mean_correct = []
  143. momentum = MOMENTUM_ORIGINAL * (MOMENTUM_DECCAY ** (epoch // MOMENTUM_DECCAY_STEP))
  144. if momentum < 0.01:
  145. momentum = 0.01
  146. print('BN momentum updated to: %f' % momentum)
  147. classifier = classifier.apply(lambda x: bn_momentum_adjust(x,momentum))
  148. '''learning one epoch'''
  149. for i, data in tqdm(enumerate(trainDataLoader), total=len(trainDataLoader), smoothing=0.9):
  150. points, label, target = data
  151. points = points.data.numpy()
  152. points[:,:, 0:3] = provider.random_scale_point_cloud(points[:,:, 0:3])
  153. points[:,:, 0:3] = provider.shift_point_cloud(points[:,:, 0:3])
  154. points = torch.Tensor(points)
  155. points, label, target = points.float().cuda(),label.long().cuda(), target.long().cuda()
  156. points = points.transpose(2, 1)
  157. optimizer.zero_grad()
  158. classifier = classifier.train()
  159. seg_pred, trans_feat = classifier(points, to_categorical(label, num_classes))
  160. seg_pred = seg_pred.contiguous().view(-1, num_part)
  161. target = target.view(-1, 1)[:, 0]
  162. pred_choice = seg_pred.data.max(1)[1]
  163. correct = pred_choice.eq(target.data).cpu().sum()
  164. mean_correct.append(correct.item() / (args.batch_size * args.npoint))
  165. loss = criterion(seg_pred, target, trans_feat)
  166. loss.backward()
  167. optimizer.step()
  168. train_instance_acc = np.mean(mean_correct)
  169. log_string('Train accuracy is: %.5f' % train_instance_acc)
  170. with torch.no_grad():
  171. test_metrics = {}
  172. total_correct = 0
  173. total_seen = 0
  174. total_seen_class = [0 for _ in range(num_part)]
  175. total_correct_class = [0 for _ in range(num_part)]
  176. shape_ious = {cat: [] for cat in seg_classes.keys()}
  177. seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
  178. for cat in seg_classes.keys():
  179. for label in seg_classes[cat]:
  180. seg_label_to_cat[label] = cat
  181. for batch_id, (points, label, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
  182. cur_batch_size, NUM_POINT, _ = points.size()
  183. points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
  184. points = points.transpose(2, 1)
  185. classifier = classifier.eval()
  186. seg_pred, _ = classifier(points, to_categorical(label, num_classes))
  187. cur_pred_val = seg_pred.cpu().data.numpy()
  188. cur_pred_val_logits = cur_pred_val
  189. cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
  190. target = target.cpu().data.numpy()
  191. for i in range(cur_batch_size):
  192. cat = seg_label_to_cat[target[i, 0]]
  193. logits = cur_pred_val_logits[i, :, :]
  194. cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0]
  195. correct = np.sum(cur_pred_val == target)
  196. total_correct += correct
  197. total_seen += (cur_batch_size * NUM_POINT)
  198. for l in range(num_part):
  199. total_seen_class[l] += np.sum(target == l)
  200. total_correct_class[l] += (np.sum((cur_pred_val == l) & (target == l)))
  201. for i in range(cur_batch_size):
  202. segp = cur_pred_val[i, :]
  203. segl = target[i, :]
  204. cat = seg_label_to_cat[segl[0]]
  205. part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
  206. for l in seg_classes[cat]:
  207. if (np.sum(segl == l) == 0) and (
  208. np.sum(segp == l) == 0): # part is not present, no prediction as well
  209. part_ious[l - seg_classes[cat][0]] = 1.0
  210. else:
  211. part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(
  212. np.sum((segl == l) | (segp == l)))
  213. shape_ious[cat].append(np.mean(part_ious))
  214. all_shape_ious = []
  215. for cat in shape_ious.keys():
  216. for iou in shape_ious[cat]:
  217. all_shape_ious.append(iou)
  218. shape_ious[cat] = np.mean(shape_ious[cat])
  219. mean_shape_ious = np.mean(list(shape_ious.values()))
  220. test_metrics['accuracy'] = total_correct / float(total_seen)
  221. test_metrics['class_avg_accuracy'] = np.mean(
  222. np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))
  223. for cat in sorted(shape_ious.keys()):
  224. log_string('eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
  225. test_metrics['class_avg_iou'] = mean_shape_ious
  226. test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)
  227. log_string('Epoch %d test Accuracy: %f Class avg mIOU: %f Inctance avg mIOU: %f' % (
  228. epoch+1, test_metrics['accuracy'],test_metrics['class_avg_iou'],test_metrics['inctance_avg_iou']))
  229. if (test_metrics['inctance_avg_iou'] >= best_inctance_avg_iou):
  230. logger.info('Save model...')
  231. savepath = str(checkpoints_dir) + '/best_model.pth'
  232. log_string('Saving at %s'% savepath)
  233. state = {
  234. 'epoch': epoch,
  235. 'train_acc': train_instance_acc,
  236. 'test_acc': test_metrics['accuracy'],
  237. 'class_avg_iou': test_metrics['class_avg_iou'],
  238. 'inctance_avg_iou': test_metrics['inctance_avg_iou'],
  239. 'model_state_dict': classifier.state_dict(),
  240. 'optimizer_state_dict': optimizer.state_dict(),
  241. }
  242. torch.save(state, savepath)
  243. log_string('Saving model....')
  244. if test_metrics['accuracy'] > best_acc:
  245. best_acc = test_metrics['accuracy']
  246. if test_metrics['class_avg_iou'] > best_class_avg_iou:
  247. best_class_avg_iou = test_metrics['class_avg_iou']
  248. if test_metrics['inctance_avg_iou'] > best_inctance_avg_iou:
  249. best_inctance_avg_iou = test_metrics['inctance_avg_iou']
  250. log_string('Best accuracy is: %.5f'%best_acc)
  251. log_string('Best class avg mIOU is: %.5f'%best_class_avg_iou)
  252. log_string('Best inctance avg mIOU is: %.5f'%best_inctance_avg_iou)
  253. global_epoch+=1
  254. if __name__ == '__main__':
  255. args = parse_args()
  256. main(args)

 接着是测试部分的代码:(用到的是:test——partseg)

  1. import argparse
  2. import os
  3. from data_utils.ShapeNetDataLoader import PartNormalDataset
  4. import torch
  5. import logging
  6. import sys
  7. import importlib
  8. from tqdm import tqdm
  9. import numpy as np
  10. #我的电脑不支持cuda,所以我的文件把所有cuda删掉了,这个文件又把他们都加回来了。
  11. BASE_DIR = os.path.dirname(os.path.abspath(__file__))
  12. ROOT_DIR = BASE_DIR
  13. sys.path.append(os.path.join(ROOT_DIR, 'models'))
  14. seg_classes = {'Airplane': [0, 1], 'Mug': [2, 3]}#我们这里是将两个类四个部件分割
  15. seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
  16. for cat in seg_classes.keys():
  17. for label in seg_classes[cat]:
  18. seg_label_to_cat[label] = cat
  19. def to_categorical(y, num_classes):
  20. """ 1-hot encodes a tensor """
  21. new_y = torch.eye(num_classes)[y.cpu().data.numpy(),]
  22. if (y.is_cuda):
  23. return new_y.cuda()
  24. return new_y
  25. def parse_args():
  26. '''PARAMETERS'''
  27. parser = argparse.ArgumentParser('PointNet')
  28. parser.add_argument('--batch_size', type=int, default=250, help='batch size in testing [default: 24]')#这里由于输出循环没太难弄懂,所以直接236(250大于236,设置一个比236大的就可以了)个测试集弄在一个batch中跑,我试过了可以跑,慢一点而已,但是如果test的太多了就估计不行了
  29. parser.add_argument('--gpu', type=str, default='0', help='specify gpu device [default: 0]')
  30. parser.add_argument('--num_point', type=int, default=2048, help='Point Number [default: 2048]')
  31. parser.add_argument('--log_dir', type=str, default='pointnet2_part_seg_ssg', help='Experiment root')
  32. parser.add_argument('--normal', action='store_true', default=False, help='Whether to use normal information [default: False]')
  33. parser.add_argument('--num_votes', type=int, default=3, help='Aggregate segmentation scores with voting [default: 3]')
  34. return parser.parse_args()
  35. def main(args):
  36. xxxxxx = 0#看一共有多少个点的,没啥用
  37. def log_string(str):
  38. logger.info(str)
  39. print(str)
  40. '''HYPER PARAMETER'''
  41. os.environ["CUDA_VISIBLE_DEVICES"] = args.gpu
  42. experiment_dir = 'log/part_seg/' + args.log_dir
  43. '''LOG'''
  44. args = parse_args()
  45. logger = logging.getLogger("Model")
  46. logger.setLevel(logging.INFO)
  47. formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
  48. file_handler = logging.FileHandler('%s/eval.txt' % experiment_dir)
  49. file_handler.setLevel(logging.INFO)
  50. file_handler.setFormatter(formatter)
  51. logger.addHandler(file_handler)
  52. log_string('PARAMETER ...')
  53. log_string(args)
  54. root = 'data/shapenetcore_partanno_segmentation_benchmark_v0_normal/'
  55. TEST_DATASET = PartNormalDataset(root = root, npoints=args.num_point, split='test', normal_channel=args.normal)
  56. testDataLoader = torch.utils.data.DataLoader(TEST_DATASET, batch_size=args.batch_size,shuffle=False, num_workers=4)
  57. log_string("The number of test data is: %d" % len(TEST_DATASET))
  58. num_classes = 2 #这里2,4要根据具体的情况来改
  59. num_part = 4
  60. '''MODEL LOADING'''
  61. model_name = os.listdir(experiment_dir+'/logs')[0].split('.')[0]
  62. MODEL = importlib.import_module(model_name)
  63. classifier = MODEL.get_model(num_part, normal_channel=args.normal).cuda()
  64. checkpoint = torch.load(str(experiment_dir) + '/checkpoints/best_model.pth')
  65. classifier.load_state_dict(checkpoint['model_state_dict'])
  66. with torch.no_grad():
  67. test_metrics = {}
  68. total_correct = 0
  69. total_seen = 0
  70. total_seen_class = [0 for _ in range(num_part)]
  71. total_correct_class = [0 for _ in range(num_part)]
  72. shape_ious = {cat: [] for cat in seg_classes.keys()}
  73. seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
  74. for cat in seg_classes.keys():
  75. for label in seg_classes[cat]:
  76. seg_label_to_cat[label] = cat
  77. for batch_id, (points, label, target) in tqdm(enumerate(testDataLoader), total=len(testDataLoader), smoothing=0.9):
  78. batchsize, num_point, _ = points.size()
  79. cur_batch_size, NUM_POINT, _ = points.size()
  80. points, label, target = points.float().cuda(), label.long().cuda(), target.long().cuda()
  81. points = points.transpose(2, 1)
  82. classifier = classifier.eval()
  83. vote_pool = torch.zeros(target.size()[0], target.size()[1], num_part).cuda()
  84. for _ in range(args.num_votes):
  85. seg_pred, _ = classifier(points, to_categorical(label, num_classes))
  86. vote_pool += seg_pred
  87. seg_pred = vote_pool / args.num_votes
  88. cur_pred_val = seg_pred.cpu().data.numpy()
  89. cur_pred_val_logits = cur_pred_val
  90. cur_pred_val = np.zeros((cur_batch_size, NUM_POINT)).astype(np.int32)
  91. target = target.cpu().data.numpy()
  92. points1 = points.transpose(2, 1).numpy()
  93. for i in range(cur_batch_size):
  94. cat = seg_label_to_cat[target[i, 0]]
  95. logits = cur_pred_val_logits[i, :, :]
  96. cur_pred_val[i, :] = np.argmax(logits[:, seg_classes[cat]], 1) + seg_classes[cat][0]
  97. #从这里开始就是依次去打印分类出的四类点的坐标了,下面的txt会自动生成,不用自己去创建空的txt,并且每一类是啥已经可以由txt的名字看出
  98. #这里需要改的就只有下面四个存取文件的路径,我是都改在一个文件下下的。
  99. #我采用的打开文件的方式是a就是往后添加,必须是这个,因为这个循环里必须一点一点添加才能获取所有的,改成w+就只能得到一个点,就是最后一次更新时候的点。
  100. #所以如果在运行完程序后一次想要再运行时候,就要把该目录下的所有文件删掉在运行,不然就换目录。
  101. aaa = numpy.argwhere(cur_pred_val[i] == 0)
  102. for j in aaa:
  103. # print(points1[i,j])
  104. res1 = open(r'E:\02691156_0_' + str(i) + '.txt', 'a')#02691156_0_xxx表示02691156这个东西的中的标签为0的那些点,xxx就表示在json文件中他是第几个,因为是每一个文件都要单独把0,1分开
  105. res1.write('\n' + str(points1[i, j]))
  106. res1.close()
  107. xxxxxx = xxxxxx + 1
  108. bbb = numpy.argwhere(cur_pred_val[i] == 1)
  109. for j in bbb:
  110. # print(points1[i, j])
  111. res2 = open(r'E:\02691156_1_' + str(i) + '.txt', 'a')#同理,这里是02691156这个东西的标签为1的那些点
  112. res2.write('\n' + str(points1[i, j]))
  113. res2.close()
  114. xxxxxx = xxxxxx + 1#测试的时候看一共有多少个点的,没啥用
  115. ccc = numpy.argwhere(cur_pred_val[i] == 2)
  116. for j in ccc:
  117. # print(points1[i, j])
  118. res3 = open(r'E:\03797390_2_' + str(i) + '.txt', 'a')#这里是03797390中标签为2的那些点
  119. res3.write('\n' + str(points1[i, j]))
  120. res3.close()
  121. xxxxxx = xxxxxx + 1
  122. ddd = numpy.argwhere(cur_pred_val[i] == 3)
  123. for j in ddd:
  124. # print(points1[i, j])
  125. res4 = open(r'E:\03797390_3_' + str(i) + '.txt', 'a')#这里是03797390中标签为3的那些点
  126. res4.write('\n' + str(points1[i, j]))
  127. res4.close()
  128. xxxxxx = xxxxxx + 1
  129. print("xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx")#稍微分割一下,测试的时候看的,没啥用
  130. correct = np.sum(cur_pred_val == target)
  131. total_correct += correct
  132. total_seen += (cur_batch_size * NUM_POINT)
  133. for l in range(num_part):
  134. total_seen_class[l] += np.sum(target == l)
  135. total_correct_class[l] += (np.sum((cur_pred_val == l) & (target == l)))
  136. for i in range(cur_batch_size):
  137. segp = cur_pred_val[i, :]
  138. segl = target[i, :]
  139. cat = seg_label_to_cat[segl[0]]
  140. part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
  141. for l in seg_classes[cat]:
  142. if (np.sum(segl == l) == 0) and (
  143. np.sum(segp == l) == 0): # part is not present, no prediction as well
  144. part_ious[l - seg_classes[cat][0]] = 1.0
  145. else:
  146. part_ious[l - seg_classes[cat][0]] = np.sum((segl == l) & (segp == l)) / float(
  147. np.sum((segl == l) | (segp == l)))
  148. shape_ious[cat].append(np.mean(part_ious))
  149. all_shape_ious = []
  150. for cat in shape_ious.keys():
  151. for iou in shape_ious[cat]:
  152. all_shape_ious.append(iou)
  153. shape_ious[cat] = np.mean(shape_ious[cat])
  154. mean_shape_ious = np.mean(list(shape_ious.values()))
  155. test_metrics['accuracy'] = total_correct / float(total_seen)
  156. test_metrics['class_avg_accuracy'] = np.mean(
  157. np.array(total_correct_class) / np.array(total_seen_class, dtype=np.float))
  158. for cat in sorted(shape_ious.keys()):
  159. log_string('eval mIoU of %s %f' % (cat + ' ' * (14 - len(cat)), shape_ious[cat]))
  160. test_metrics['class_avg_iou'] = mean_shape_ious
  161. test_metrics['inctance_avg_iou'] = np.mean(all_shape_ious)
  162. print(xxxxxx)#看一共有多少个点的,没啥用
  163. log_string('Accuracy is: %.5f'%test_metrics['accuracy'])
  164. log_string('Class avg accuracy is: %.5f'%test_metrics['class_avg_accuracy'])
  165. log_string('Class avg mIOU is: %.5f'%test_metrics['class_avg_iou'])
  166. log_string('Inctance avg mIOU is: %.5f'%test_metrics['inctance_avg_iou'])
  167. if __name__ == '__main__':
  168. args = parse_args()
  169. main(args)

接着是有关文件处理(数据预处理等)的代码:

  1. import os
  2. filePath = "D:\\Learning\\无人机项目\\Pointnet2\\Pointnet2\\data\\shapenetcore_partanno_segmentation_benchmark_v0_normal\\03797390\\"
  3. #####最后一行会出现一个, 报错!!!!!!!!!
  4. ######手动删除或改进程序
  5. #####
  6. file = '1.txt'
  7. with open(file,'a') as f:
  8. f.write("[")
  9. for i,j,k in os.walk(filePath):
  10. for name in k:
  11. base_name=os.path.splitext(name)[0] #去掉后缀 .txt
  12. f.write(" \"")
  13. f.write(os.path.join("shape_data/03797390/",base_name))
  14. f.write("\"")
  15. f.write(",")
  16. f.write("]")
  17. f.close()
  1. # -*- coding:utf-8 -*-
  2. import os
  3. filePath = 'D:\\Learning\\无人机项目\\Pointnet2\\Pointnet2\\data\\shapenetcore_partanno_segmentation_benchmark_v0_normal\\02691156\\'
  4. for i,j,k in os.walk(filePath):
  5. for name in k:
  6. list1 = []
  7. for line in open(filePath+name):
  8. a = line.split()
  9. #print(a)
  10. b = a[0:6]
  11. #print(b)
  12. a1 =float(a[0])
  13. a2 =float(a[1])
  14. a3 =float(a[2])
  15. #print(a1)
  16. if(a1==0 and a2==0 and a3==0):
  17. continue
  18. list1.append(b[0:6])
  19. with open(filePath+name, 'w+') as file:
  20. for i in list1:
  21. file.write(str(i[0]))
  22. file.write(' '+str(i[1]))
  23. file.write(' ' + str(i[2]))
  24. file.write(' ' + str(i[3]))
  25. file.write(' ' + str(i[4]))
  26. if(i!=list[-1]):
  27. file.write('\n')
  28. file.close()
  29. # print(list)
  30. # import os
  31. # filePath = '161865156110305.txt'
  32. # for i,j,k in os.walk(filePath):
  33. # for name in k:
  34. # print(name)
  35. # f = open(filePath+name) # 打开txt文件
  36. # line = f.readline() # 以行的形式进行读取文件
  37. # list1 = []
  38. # while line:
  39. # a = line.split()
  40. # b = a[0:3] # 这是选取需要读取/修改的列 前两列
  41. # c = float(a[-1])
  42. # a1 =float(a[0])
  43. # a2 =float(a[1])
  44. # a3 =float(a[2])
  45. # if(a1==0 and a2==0 and a3==0)
  46. # print(c)
  47. # if(float(a[-1])==36.0):
  48. # c=2
  49. # if(float(a[-1])==37.0):
  50. # c=3
  51. # b.append(c)
  52. # list1.append(b) # 将其添加在列表之中
  53. # line = f.readline()
  54. # f.close()
  55. # print(list1)
  56. # with open(filePath+name, 'w+') as file:
  57. # for i in list1:
  58. # file.write(str(i[0]))
  59. # file.write(' '+str(i[1]))
  60. # file.write(' ' + str(i[2]))
  61. # file.write(' ' + str(i[3]))
  62. # if(i!=list[-1]):
  63. # file.write('\n')
  64. # file.close()
  65. # path_out = 'test.txt' # 新的txt文件
  66. # with open(path_out, 'w+') as f_out:
  67. # for i in list1:
  68. # fir = '9443_' + i[0] # 第一列加前缀'9443_'
  69. # sec = 9443 + int(i[1]) # 第二列数值都加9443
  70. # # print(fir)
  71. # # print(str(sec))
  72. # f_out.write(fir + ' ' + str(sec) + '\n') # 把前两列写入新的txt文件

祝小伙伴们心想事成!!!

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/101821?site
推荐阅读
相关标签
  

闽ICP备14008679号