当前位置:   article > 正文

【知识点】Cifar10-----VGG/Resnet/mobileNet/inceptionNet 测试_mobilenet cifar10

mobilenet cifar10

为了熟悉神经网络,搭建建议结构,简化大型网络。

1. Train.py

注:只要更改输入网络名称即可

2. 使用cifar10数据集

首先要完成数据集得解压

  1. #把官网下载的cifar-10文件转化为图片保存在新建文件夹Train和Test中
  2. import pickle
  3. import glob
  4. import os
  5. import cv2
  6. import numpy as np
  7. #通过该函数可以拿到字典类型的数据,包含原始图片数据和label
  8. def unpickle(file):
  9. with open(file, 'rb') as fo:
  10. dict = pickle.load(fo, encoding='bytes')
  11. return dict
  12. label_name= ["airplane",
  13. "automobile",
  14. "bird",
  15. "cat",
  16. "deer",
  17. "dog",
  18. "frog",
  19. "horse",
  20. "ship",
  21. "truck"]
  22. #通过glob:文件匹配的方式读取当前文件夹下相匹配的文件
  23. train_list = glob.glob("—————————改成自己文件夹名称———————————————\Test_batch*") #data_batch_*-->Test_batch*
  24. print(train_list)
  25. save_path = "—————————改成自己文件夹名称———————————————\Test" #Train-->Test
  26. for l in train_list:
  27. print(l)
  28. l_dict = unpickle(l)
  29. #print(l_dict) #print all
  30. print(l_dict.keys()) #dict_keys([b'batch_label', b'labels', b'data', b'filenames'])
  31. for im_idx, im_data in enumerate(l_dict[b'data']): #enumerate() 函数用于将一个可遍历的数据对象(如列表、元组或字符串)组合为一个索引序列,同时列出数据和数据下标,一般用在 for 循环当中
  32. #print(im_idx)
  33. #print(im_data) #图片以向量的形式存在
  34. #打印出索引值和图片,但是图片以向量形式表示,要reshape成为3X32X32,再对3这个维度进行交换,变为32X32X3
  35. #利用索引值拿到label,name
  36. im_label = l_dict[b'labels'][im_idx]
  37. im_name = l_dict[b'filenames'][im_idx]
  38. # print(im_label, im_name, im_data)
  39. #将数据存放到新建的训练集文件夹下
  40. im_label_name = label_name[im_label]
  41. im_data = np.reshape(im_data, [3, 32, 32])
  42. im_data = np.transpose(im_data, (1, 2, 0))
  43. #cv2.imshow("im_data", cv2.resize(im_data, (200, 200)))
  44. #cv2.waitKey(0)
  45. #在Train文件夹下对每一个类创建一个文件夹
  46. #os作用:如果不存在文件夹,则创建一个新的
  47. if not os.path.exists("{}/{}".format(save_path,
  48. im_label_name)):
  49. os.mkdir("{}/{}".format(save_path, im_label_name))
  50. #写入图片
  51. cv2.imwrite("{}/{}/{}".format(save_path,
  52. im_label_name,
  53. im_name.decode("utf-8")), #此时im_name为byte型,要转化为字符串型,加decode
  54. im_data)

 改文件夹的位置,先写入Train,再写入Test,即运行两次

3. VGG

  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. class VGGbase(nn.Module):
  5. def __init__(self):
  6. super(VGGbase, self).__init__()
  7. # 3 * 28 * 28 (经过crop之后 32 resize到 28)
  8. self.conv1 = nn.Sequential( #序列容器,用于搭建神经网络的模块被按照被传入构造器的顺序添加到nn.Sequential()容器中
  9. nn.Conv2d(3, 64, kernel_size=3, stride=1, padding=1), #输入通道为3,输出通道为64
  10. nn.BatchNorm2d(64), #归一化处理,输出channel数64
  11. nn.ReLU()
  12. )
  13. #卷积不改变图片大小,只改变维度
  14. # 28 * 28
  15. self.max_pooling1 = nn.MaxPool2d(kernel_size=2, stride=2)
  16. # 14 * 14
  17. self.conv2_1 = nn.Sequential(
  18. nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1),
  19. nn.BatchNorm2d(128),
  20. nn.ReLU()
  21. )
  22. self.conv2_2 = nn.Sequential(
  23. nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1),
  24. nn.BatchNorm2d(128),
  25. nn.ReLU()
  26. )
  27. self.max_pooling2 = nn.MaxPool2d(kernel_size=2, stride=2)
  28. # 7 * 7
  29. #遇上奇数,设置padding=1
  30. self.conv3_1 = nn.Sequential(
  31. nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1),
  32. nn.BatchNorm2d(256),
  33. nn.ReLU()
  34. )
  35. self.conv3_2 = nn.Sequential(
  36. nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1),
  37. nn.BatchNorm2d(256),
  38. nn.ReLU()
  39. )
  40. self.max_pooling3 = nn.MaxPool2d(kernel_size=2, stride=2, padding=1)
  41. # 4 * 4
  42. self.conv4_1 = nn.Sequential(
  43. nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1),
  44. nn.BatchNorm2d(512),
  45. nn.ReLU()
  46. )
  47. self.conv4_2 = nn.Sequential(
  48. nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1),
  49. nn.BatchNorm2d(512),
  50. nn.ReLU()
  51. )
  52. self.max_pooling4 = nn.MaxPool2d(kernel_size=2, stride=2)
  53. # 2 * 2
  54. #定义FC层
  55. # batchsize * 512 * 2 * 2 -->batchsize * (512 * 4)
  56. self.fc = nn.Linear(512*4, 10) #nn.Linear(in_features, out_features)
  57. #串联,构造分类网络
  58. def forward(self, x):
  59. batchsize = x.size(0) #第0维度的数据数量
  60. out = self.conv1(x)
  61. out = self.max_pooling1(out)
  62. out = self.conv2_1(out)
  63. out = self.conv2_2(out)
  64. out = self.max_pooling2(out)
  65. out = self.conv3_1(out)
  66. out = self.conv3_2(out)
  67. out = self.max_pooling3(out)
  68. out = self.conv4_1(out)
  69. out = self.conv4_2(out)
  70. out = self.max_pooling4(out)
  71. #展平
  72. out = out.view(batchsize, -1) #-1:根据具体维度进行计算
  73. #FC层
  74. # batchsize * c * h * w --> batchsize * n
  75. out = self.fc(out)
  76. out = F.log_softmax(out, dim=1)
  77. return out
  78. def VGGNet():
  79. return VGGbase()

4. Resnet

  1. """
  2. resnet 跳连结构
  3. """
  4. import torch
  5. import torch.nn as nn
  6. import torch.nn.functional as F
  7. class ResBlock(nn.Module):
  8. def __init__(self, in_channel, out_channel, stride=1):
  9. super(ResBlock, self).__init__()
  10. #定义主干分支
  11. self.layer = nn.Sequential(
  12. nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),
  13. nn.BatchNorm2d(out_channel),
  14. nn.ReLU(),
  15. nn.Conv2d(out_channel, out_channel, kernel_size=3,stride=1,padding=1),
  16. nn.BatchNorm2d(out_channel),
  17. )
  18. #定义跳连分支
  19. self.shortcut = nn.Sequential()
  20. if in_channel != out_channel or stride > 1:
  21. #跳连分支,将输入的结果直连
  22. self.shortcut=nn.Sequential(
  23. nn.Conv2d(in_channel, out_channel, kernel_size=3, stride=stride, padding=1),
  24. nn.BatchNorm2d(out_channel),
  25. )
  26. def forward(self, x):
  27. out1 = self.layer(x)
  28. out2 = self.shortcut(x)
  29. out = out1 + out2
  30. out = F.relu(out)
  31. return out
  32. class ResNet(nn.Module):
  33. #对多个层的定义
  34. def make_layer(self, block, out_channel, stride, num_block): #num_block表示定义多少个卷积层
  35. layer_list = [] #存放相应的层
  36. for i in range(num_block):
  37. if i == 0:
  38. in_stride = stride
  39. else:
  40. in_stride = 1
  41. layer_list.append(block(self.in_channel, out_channel, in_stride))
  42. self.in_channel = out_channel
  43. return nn.Sequential(*layer_list)
  44. def __init__(self, ResBlock):
  45. super(ResNet, self).__init__()
  46. self.in_channel = 32
  47. self.conv1 = nn.Sequential(
  48. nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1),
  49. nn.BatchNorm2d(32),
  50. nn.ReLU()
  51. )
  52. self.layer1 = self.make_layer(ResBlock, 64, 2, 2)
  53. self.layer2 = self.make_layer(ResBlock, 128, 2, 2)
  54. self.layer3 = self.make_layer(ResBlock, 256, 2, 2)
  55. self.layer4 = self.make_layer(ResBlock, 512, 2, 2)
  56. self.fc = nn.Linear(512, 10) #10个类别
  57. def forward(self, x):
  58. out = self.conv1(x)
  59. out = self.layer1(out)
  60. out = self.layer2(out)
  61. out = self.layer3(out)
  62. out = self.layer4(out)
  63. out = F.avg_pool2d(out, 2) #输出结果转换为1 * 1
  64. out = out.view(out.size(0), -1)
  65. out = self.fc(out)
  66. return out
  67. def resnet():
  68. return ResNet(ResBlock)

5. mobileNet

  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. class mobilenet(nn.Module):
  5. #mobilenet 基本单元
  6. def conv_dw(self, in_channel, out_channel, stride):
  7. return nn.Sequential(
  8. nn.Conv2d(in_channel, in_channel, kernel_size=3, stride=stride, padding=1, groups=in_channel, bias=False),
  9. nn.BatchNorm2d(in_channel),
  10. nn.ReLU(),
  11. nn.Conv2d(in_channel, out_channel, kernel_size=1, stride=1, padding=0, bias=False),
  12. nn.BatchNorm2d(out_channel),
  13. nn.ReLU(),
  14. )
  15. def __init__(self):
  16. #核心算子的定义
  17. super(mobilenet, self).__init__()
  18. self.conv1 = nn.Sequential( # 序列容器,用于搭建神经网络的模块被按照被传入构造器的顺序添加到nn.Sequential()容器中
  19. nn.Conv2d(3, 32, kernel_size=3, stride=1, padding=1), # 输入通道为3,输出通道为64
  20. nn.BatchNorm2d(32), # 归一化处理,输出channel数64
  21. nn.ReLU()
  22. )
  23. self.conv_dw2 = self.conv_dw(32, 32, 1)
  24. self.conv_dw3 = self.conv_dw(32, 64, 2) #进行一次下采样
  25. self.conv_dw4 = self.conv_dw(64, 64, 1)
  26. self.conv_dw5 = self.conv_dw(64, 128, 2)
  27. self.conv_dw6 = self.conv_dw(128, 128, 1)
  28. self.conv_dw7 = self.conv_dw(128, 256, 2)
  29. self.conv_dw8 = self.conv_dw(256, 256, 1)
  30. self.conv_dw9 = self.conv_dw(256, 512, 2)
  31. #以上conv_dw(*, *, 2) 4个2,即进行2^4=16倍的下采样
  32. self.fc = nn.Linear(512,10)
  33. #对以上算子进行串联,完成前项运算
  34. def forward(self, x):
  35. out = self.conv1(x)
  36. out = self.conv_dw2(out)
  37. out = self.conv_dw3(out)
  38. out = self.conv_dw4(out)
  39. out = self.conv_dw5(out)
  40. out = self.conv_dw6(out)
  41. out = self.conv_dw7(out)
  42. out = self.conv_dw8(out)
  43. out = self.conv_dw9(out)
  44. out = F.avg_pool2d(out, 2) #上面16倍的下采样,此处采用2 * 2 的&进行average pooling
  45. out = out.view(-1, 512) #转为2维图
  46. out = self.fc(out) #对5122维向10维分布转化
  47. return out
  48. def mobilenetv1_small():
  49. return mobilenet()

6. inceptionNet

  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. """
  5. input: A
  6. 对于resnet
  7. B = A + f(A) 为保证A 和 f(A) 的feature map特征图相同
  8. 所以
  9. B = g(A) + f(A)
  10. g(A): 对A进行判断:如果输入channel和输出channel不相同,或者stride > 2, 对A进行卷积运算,保证和输出的特征图大小一致
  11. ______________________________________________________________________________________________________
  12. 对于inception
  13. 对输入A进行不同的卷积和pooling运算,得到不同的结果后(B1, B2, B3...)进行串联
  14. B1 = f1(A)
  15. B2 = f2(A)
  16. B3 = f3(A)
  17. B4 = f4(A)
  18. concat([B1, B2, B3, B4])
  19. """
  20. def ConvBNRelu(in_channel, out_channel, kernel_size):
  21. return nn.Sequential(
  22. nn.Conv2d(in_channel, out_channel, kernel_size=kernel_size, stride=1, padding=kernel_size//2), # //2:整除,得商kernel_size=3, padding=1; kernel_size=5, padding=2; kernel_size=7, padding=3
  23. nn.BatchNorm2d(out_channel), # 归一化处理,输出channel数64
  24. nn.ReLU()
  25. )
  26. class BaseInception(nn.Module):
  27. def __init__(self, in_channel, out_channel_list, reduce_channel_list):
  28. super(BaseInception, self).__init__()
  29. self.branch1_conv = ConvBNRelu(in_channel, out_channel_list[0], 1)
  30. self.branch2_conv1 = ConvBNRelu(in_channel, reduce_channel_list[0], 1) #压缩
  31. self.branch2_conv2 = ConvBNRelu(reduce_channel_list[0], out_channel_list[1], 3)
  32. self.branch3_conv1 = ConvBNRelu(in_channel, reduce_channel_list[1], 1)
  33. self.branch3_conv2 = ConvBNRelu(reduce_channel_list[1], out_channel_list[2], 5)
  34. self.branch4_pool = nn.MaxPool2d(kernel_size=3, stride=1, padding=1)
  35. self.branch4_conv = ConvBNRelu(in_channel, out_channel_list[3], 3)
  36. def forward(self, x):
  37. out1 = self.branch1_conv(x)
  38. out2 = self.branch2_conv1(x)
  39. out2 = self.branch2_conv2(out2)
  40. out3 = self.branch3_conv1(x)
  41. out3 = self.branch3_conv2(out3)
  42. out4 = self.branch4_pool(x)
  43. out4 = self.branch4_conv(out4)
  44. out = torch.cat([out1, out2, out3, out4], dim=1)
  45. return out
  46. class InceptionNet(nn.Module):
  47. #初始化
  48. def __init__(self):
  49. super(InceptionNet, self).__init__()
  50. self.block1 = nn.Sequential(
  51. nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=1),
  52. nn.BatchNorm2d(64),
  53. nn.ReLU()
  54. )
  55. self.block2 = nn.Sequential(
  56. nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
  57. nn.BatchNorm2d(128),
  58. nn.ReLU()
  59. )
  60. self.block3 = nn.Sequential(
  61. BaseInception(in_channel=128, out_channel_list=[64, 64, 64, 64], reduce_channel_list=[16, 16]),
  62. nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
  63. )
  64. self.block4 = nn.Sequential(
  65. BaseInception(in_channel=256, out_channel_list=[96, 96, 96, 96], reduce_channel_list=[32, 32]),
  66. nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
  67. )
  68. self.fc = nn.Linear(384, 10) #96 * 4 = 384
  69. #组装算子,完成前向推理
  70. def forward(self, x):
  71. out = self.block1(x)
  72. out = self.block2(out)
  73. out = self.block3(out)
  74. out = self.block4(out)
  75. out = F.avg_pool2d(out, 2)
  76. out = out.view(out.size(0), -1)
  77. out = self.fc(out)
  78. return out
  79. def InceptionNetSmall():
  80. return InceptionNet()

运行结果(Test)
VGG

 

 

 resnet

 

 mobileNet

 

 inceptionNet

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小舞很执着/article/detail/957551
推荐阅读
相关标签
  

闽ICP备14008679号