当前位置:   article > 正文

Pytorch实现Vgg16Net

Pytorch实现Vgg16Net
from torch import nn


class Vgg16Net(nn.Module):
    def __init__(self):
        super(Vgg16Net, self).__init__()

        # 第一层,2个卷积层和一个最大池化层
        self.layer1 = nn.Sequential(
            # 输入3通道,卷积核3*3,输出64通道(如32*32*3的样本图片,(32+2*1-3)/1+1=32,输出32*32*64)
            nn.Conv2d(3, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            # 输入64通道,卷积核3*3,输出64通道(输入32*32*64,卷积3*3*64*64,输出32*32*64)
            nn.Conv2d(64, 64, 3, padding=1),
            nn.BatchNorm2d(64),
            nn.ReLU(inplace=True),
            # 输入32*32*64,输出16*16*64
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第二层,2个卷积层和一个最大池化层
        self.layer2 = nn.Sequential(
            # 输入64通道,卷积核3*3,输出128通道(输入16*16*64,卷积3*3*64*128,输出16*16*128)
            nn.Conv2d(64, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            # 输入128通道,卷积核3*3,输出128通道(输入16*16*128,卷积3*3*128*128,输出16*16*128)
            nn.Conv2d(128, 128, 3, padding=1),
            nn.BatchNorm2d(128),
            nn.ReLU(inplace=True),
            # 输入16*16*128,输出8*8*128
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第三层,3个卷积层和一个最大池化层
        self.layer3 = nn.Sequential(
            # 输入128通道,卷积核3*3,输出256通道(输入8*8*128,卷积3*3*128*256,输出8*8*256)
            nn.Conv2d(128, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            # 输入256通道,卷积核3*3,输出256通道(输入8*8*256,卷积3*3*256*256,输出8*8*256)
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            # 输入256通道,卷积核3*3,输出256通道(输入8*8*256,卷积3*3*256*256,输出8*8*256)
            nn.Conv2d(256, 256, 3, padding=1),
            nn.BatchNorm2d(256),
            nn.ReLU(inplace=True),
            # 输入8*8*256,输出4*4*256
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第四层,3个卷积层和1个最大池化层
        self.layer4 = nn.Sequential(
            # 输入256通道,卷积3*3,输出512通道(输入4*4*256,卷积3*3*256*512,输出4*4*512)
            nn.Conv2d(256, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入4*4*512,卷积3*3*512*512,输出4*4*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入4*4*512,卷积3*3*512*512,输出4*4*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入4*4*512,输出2*2*512
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        # 第五层,3个卷积层和1个最大池化层
        self.layer5 = nn.Sequential(
            # 输入512通道,卷积3*3,输出512通道(输入2*2*512,卷积3*3*512*512,输出2*2*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入2*2*512,卷积3*3*512*512,输出2*2*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入512通道,卷积3*3,输出512通道(输入2*2*512,卷积3*3*512*512,输出2*2*512)
            nn.Conv2d(512, 512, 3, padding=1),
            nn.BatchNorm2d(512),
            nn.ReLU(inplace=True),
            # 输入2*2*512,输出1*1*512
            nn.MaxPool2d(kernel_size=2, stride=2)
        )
        self.conv_layer = nn.Sequential(
            self.layer1,
            self.layer2,
            self.layer3,
            self.layer4,
            self.layer5
        )

        self.fc = nn.Sequential(
            nn.Linear(512, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),

            nn.Linear(4096, 4096),
            nn.ReLU(inplace=True),
            nn.Dropout(),

            nn.Linear(4096, 1000)
        )

    def forward(self, x):
        x = self.conv_layer(x)
        x = x.view(-1, 512)
        x = self.fc(x)
        return x

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110

注意:本博客写的代码网络: 输入32*32*3 ,输出1*1*512。原网络:输入224*224*3 ,输出7*7*512
参考:https://blog.csdn.net/aa330233789/article/details/106411301?utm_medium=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-8.nonecase&depth_1-utm_source=distribute.pc_relevant.none-task-blog-BlogCommendFromMachineLearnPai2-8.nonecase

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/笔触狂放9/article/detail/957611
推荐阅读
相关标签
  

闽ICP备14008679号