当前位置:   article > 正文

pytorch椎骨检测之分类模型_pytorch 识别脊柱背部

pytorch 识别脊柱背部

这几天用resnet50跑了椎骨检测的第一个分类模型,验证集准确率最高有77%

给力的队友已经将每一张大的椎骨图像做好定位并切成小块,做好类别标记并数据增强了,弱鸡的我只要把数据放进分类模型就好

不说了,上代码吧

首先是数据集处理的load_dataset.py

这里用到的是https://blog.csdn.net/qq_40356092/article/details/108472127的代码

  1. import torch
  2. import torchvision
  3. from torchvision import datasets, transforms
  4. #对训练集做一个变换
  5. train_transforms = transforms.Compose([
  6. transforms.RandomResizedCrop(224), #对图片尺寸做一个缩放切割
  7. transforms.RandomHorizontalFlip(), #水平翻转
  8. transforms.ToTensor(), #转化为张量
  9. transforms.Normalize((.5, .5, .5), (.5, .5, .5)) #进行归一化
  10. ])
  11. #对测试集做变换
  12. test_transforms = transforms.Compose([
  13. transforms.RandomResizedCrop(224),
  14. transforms.ToTensor(),
  15. transforms.Normalize((.5, .5, .5), (.5, .5, .5))
  16. ])
  17. def load_local_dataset(dataset_dir, ratio = 0.8, batch_size = 256):
  18. #获取数据集
  19. all_datasets = datasets.ImageFolder(dataset_dir, transform=train_transforms)
  20. #将数据集划分成训练集和测试集
  21. train_size=int(ratio * len(all_datasets))
  22. test_size=len(all_datasets) - train_size
  23. train_datasets, test_datasets = torch.utils.data.random_split(all_datasets, [train_size, test_size])
  24. train_iter = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
  25. test_iter = torch.utils.data.DataLoader(test_datasets, batch_size=batch_size, shuffle=True)
  26. return train_iter,test_iter
  27. def load_train_test_dataset(train_dir, test_dir , batch_size = 256):
  28. #获取数据集
  29. train_datasets = datasets.ImageFolder(train_dir, transform=train_transforms)
  30. test_datasets = datasets.ImageFolder(test_dir, transform=test_transforms)
  31. train_iter = torch.utils.data.DataLoader(train_datasets, batch_size=batch_size, shuffle=True)
  32. test_iter = torch.utils.data.DataLoader(test_datasets, batch_size=batch_size, shuffle=True)
  33. return train_iter,test_iter

然后就是用GPU开始跑resnet50了,之所以跑50是因为显卡带不动resnet152,害

先是头部分

  1. import torch
  2. import os
  3. import load_dataset
  4. import torch.nn as nn
  5. import torchvision.datasets as dsets
  6. import torchvision.transforms as transforms
  7. from torch.autograd import Variable
  8. from collections import OrderedDict
  9. import pandas as pd
  10. os.environ['CUDA_VISIBLE_DEVICES'] = "0,1,2,3" #这里的赋值必须是字符串,list会报错
  11. device_ids=[0,1,2,3]
  12. train_measure_results = []
  13. test_measure_results = []
  14. def dprint(d):
  15. out = []
  16. for k, v in d.items():
  17. out.append(f"{k}: {v:0.4f}")
  18. print(", ".join(out))

下面开始加载模型

  1. #加载模型
  2. #定义两层的残差块
  3. class Residual_2(nn.Module):
  4. def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):
  5. super(Residual_2, self).__init__()
  6. #两个3*3的卷积层
  7. self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1, stride=stride)
  8. self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
  9. #1*1的卷积保证维度一致
  10. if use_1x1conv:
  11. self.conv3 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
  12. else:
  13. self.conv3 = None
  14. #BN层
  15. self.bn1 = nn.BatchNorm2d(out_channels)
  16. self.bn2 = nn.BatchNorm2d(out_channels)
  17. def forward(self, X):
  18. Y = self.conv1(X)
  19. Y = self.bn1(Y)
  20. Y = torch.nn.functional.relu(Y)
  21. Y = self.conv2(Y)
  22. Y = self.bn2(Y)
  23. if self.conv3:
  24. X = self.conv3(X)
  25. return torch.nn.functional.relu(Y + X)
  26. #定义三层的残差块
  27. class Residual_3(nn.Module):
  28. def __init__(self, in_channels, out_channels, use_1x1conv=False, stride=1):
  29. super(Residual_3, self).__init__()
  30. #三层卷积层
  31. self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=1, stride=stride)
  32. self.conv2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)
  33. self.conv3 = nn.Conv2d(out_channels, out_channels*4, kernel_size=1)
  34. #1*1的卷积保证维度一致
  35. if use_1x1conv:
  36. self.conv4 = nn.Conv2d(in_channels, out_channels*4, kernel_size=1, stride=stride)
  37. else:
  38. self.conv4 = None
  39. #BN层
  40. self.bn1 = nn.BatchNorm2d(out_channels)
  41. self.bn2 = nn.BatchNorm2d(out_channels)
  42. self.bn3 = nn.BatchNorm2d(out_channels*4)
  43. def forward(self, X):
  44. Y = self.conv1(X)
  45. Y = self.bn1(Y)
  46. Y = torch.nn.functional.relu(Y)
  47. Y = self.conv2(Y)
  48. Y = self.bn2(Y)
  49. Y = torch.nn.functional.relu(Y)
  50. Y = self.conv3(Y)
  51. Y = self.bn3(Y)
  52. if self.conv4:
  53. X = self.conv4(X)
  54. return torch.nn.functional.relu(Y + X)
  55. classes=7
  56. #平铺
  57. class FlattenLayer(nn.Module):
  58. def __init__(self):
  59. super(FlattenLayer, self).__init__()
  60. def forward(self, input):
  61. return input.view(input.size(0), -1)
  62. #全局平均池化层
  63. class GlobalAvgPool2d(nn.Module):
  64. def __init__(self):
  65. super(GlobalAvgPool2d, self).__init__()
  66. def forward(self, x):
  67. return nn.functional.avg_pool2d(x, kernel_size=x.size()[2:])
  68. def resnet_block(in_channels, out_channels, num_residuals, basicblock=2, first_block=False):
  69. blk = []
  70. for i in range(num_residuals):
  71. if basicblock == 2:
  72. if i == 0 and first_block == False :
  73. blk.append(Residual_2(in_channels, out_channels, use_1x1conv=True, stride=2))
  74. else :
  75. blk.append(Residual_2(out_channels, out_channels))
  76. else:
  77. if i==0:
  78. if first_block:
  79. blk.append(Residual_3(in_channels, out_channels, use_1x1conv=True))
  80. else :
  81. blk.append(Residual_3(in_channels*4, out_channels, use_1x1conv=True, stride=2))
  82. else:
  83. blk.append(Residual_3(out_channels*4, out_channels, use_1x1conv=True))
  84. return nn.Sequential(*blk)
  85. # 定义resnet网络
  86. def ResNet_model(layers):
  87. #前两层
  88. net = nn.Sequential(
  89. # 7*7的卷积层
  90. nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3),
  91. nn.BatchNorm2d(64),
  92. nn.ReLU(),
  93. # 3*3的最大池化层
  94. nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
  95. )
  96. #定义不同结构的ResNet
  97. if layers == 18:
  98. basicblock=2
  99. num_residual=[2,2,2,2]
  100. elif layers == 34:
  101. basicblock=2
  102. num_residual=[3,4,6,3]
  103. elif layers == 50:
  104. basicblock=3
  105. num_residual=[3,4,6,3]
  106. elif layers == 101:
  107. basicblock=3
  108. num_residual=[3,4,23,3]
  109. elif layers == 152:
  110. basicblock=3
  111. num_residual=[3,8,36,3]
  112. else :
  113. exit("ResNet结构不对!")
  114. #添加block
  115. net.add_module("resnet_block1", resnet_block(64, 64, num_residual[0], basicblock, first_block=True))
  116. net.add_module("resnet_block2", resnet_block(64, 128, num_residual[1], basicblock))
  117. net.add_module("resnet_block3", resnet_block(128, 256, num_residual[2], basicblock))
  118. net.add_module("resnet_block4", resnet_block(256, 512, num_residual[3], basicblock))
  119. #添加平均池化层、全连接层
  120. net.add_module("global_avg_pool", GlobalAvgPool2d())
  121. if basicblock==2:
  122. net.add_module("fc", nn.Sequential(FlattenLayer(), nn.Linear(512, classes)))
  123. else:
  124. net.add_module("fc", nn.Sequential(FlattenLayer(), nn.Linear(2048, classes)))
  125. return net
  126. resnet = ResNet_model(50)
  127. resnet=torch.nn.DataParallel(resnet,device_ids=device_ids)
  128. resnet = resnet.cuda()
  129. print("model loaded...")
  130. #加载数据集
  131. ratio=0.8
  132. batch_size=256
  133. #加载MNIST数据集,图片大小为28x28x1,记得修改网络结构
  134. # root="E:/数据集"
  135. # train_iter,test_iter=load_dataset.load_FashionMNIST(root,batch_size)
  136. #加载训练集和测试集
  137. # train_dir = "E:/数据集/rice_diseases/train"
  138. # test_dir = "E:/数据集/rice_diseases/test"
  139. # train_iter, test_iter = load_dataset.load_train_test_dataset(train_dir, test_dir, batch_size)

导入我们的训练集和测试集,这里训练集和测试集是放在一起的,其中文件的格式按照https://blog.csdn.net/qq_40356092/article/details/108472127的格式处理好就好

  1. #训练集和测试集在一个文件夹下
  2. dataset_dir = "./all_image"
  3. train_iter,test_iter=load_dataset.load_local_dataset(dataset_dir,ratio,batch_size)
  4. print("data loaded...")
  5. print("训练集=",len(train_iter))
  6. print("测试集=",len(test_iter))
  7. #定义损失函数和优化器
  8. lr,num_epochs =0.001, 200
  9. loss = torch.nn.CrossEntropyLoss().cuda() #损失函数
  10. optimizer = torch.optim.Adam(resnet.parameters(), lr=lr) #优化器
  11. def train(net, train_iter, test_iter, optimizer, loss, num_epochs):
  12. for epoch in range(num_epochs):
  13. # 训练过程
  14. net.train() # 启用 BatchNormalization 和 Dropout
  15. train_l_sum, train_acc_sum, train_num = 0.0, 0.0, 0
  16. for X, y in train_iter:
  17. X = X.cuda()
  18. y = y.cuda()
  19. y_hat = net(X)
  20. l = loss(y_hat, y).sum()
  21. optimizer.zero_grad()
  22. l.backward()
  23. optimizer.step()
  24. #计算准确率
  25. train_l_sum += l.item()
  26. train_acc_sum += (y_hat.argmax(dim=1) == y).sum().item()
  27. train_num += y.shape[0]
  28. print('epoch %d, loss %.4f, train acc %.3f' % (epoch + 1, train_l_sum / train_num, train_acc_sum / train_num))
  29. train_results = OrderedDict()
  30. train_results['loss'] = train_l_sum / train_num
  31. train_results['train_acc'] = train_acc_sum / train_num
  32. dprint(train_results)
  33. train_measure_results.append(train_results)
  34. # 测试过程
  35. if (epoch+1) %5 == 0:
  36. test_acc_sum, test_num= 0.0, 0
  37. with torch.no_grad(): #不会求梯度、反向传播
  38. net.eval() # 不启用 BatchNormalization 和 Dropout
  39. for X,y in test_iter:
  40. X = X.cuda()
  41. y = y.cuda()
  42. test_acc_sum += (net(X).argmax(dim=1) == y).float().sum().item()
  43. test_num += y.shape[0]
  44. print('test acc %.3f' % (test_acc_sum / test_num))
  45. test_results = OrderedDict()
  46. test_results['test_acc'] = test_acc_sum / test_num
  47. dprint(test_results)
  48. test_measure_results.append(test_results)
  49. torch.save(net.module.state_dict(), f'./checkpoint/model_{str(epoch + 1).zfill(4)}.pt') # 保存模型
  50. df_train = pd.DataFrame(train_measure_results)
  51. df_test = pd.DataFrame(test_measure_results)
  52. df_train.to_csv(f"train_result.csv")
  53. df_test.to_csv(f"test_result.csv")
  54. train(resnet, train_iter, test_iter, optimizer, loss, num_epochs)

然后十分感谢这几篇文章,代码都是在这几篇文章的基础上修改的:

使用自己的数据导入pytorch多GPU训练(其实自己还是没有解决其中一个显卡占比多的问题),还有很多resnet的模型代码,就不一一列举了

最后说说代码的问题,由于有些部分没有修饰,跑一个epoch要一分钟左右,还是很慢的

我好菜啊

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小惠珠哦/article/detail/965637
推荐阅读
相关标签
  

闽ICP备14008679号