赞
踩
因为每次使用卷积层都需要调用Con2d和relu激活函数,每次都调用非常麻烦,就将他们打包在一起写成一个类。
in_channels:输入矩阵深度作为参数输入
out_channels: 输出矩阵深度作为参数输入
经过卷积层和relu激活函数之后通过正向传播得到输出。
- class BasicConv2d(nn.Module):
- def __init__(self, in_chaannels, out_channels, **kwargs):
- super(BasicConv2d, self).__init__()
- self.conv = nn.Conv2d(in_chaannels, out_channels, **kwargs)
- self.conv = nn.ReLU(inplace=True)
- # 定义正向传播
- def forward(self, x):
- x = self.conv(x)
- x = self.relu(x)
- return x
def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
in_channels:输入特征矩阵的参数
ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj:表格中对应需要的参数。
branch1、2、3、4 分别对应inception结构中的四个分支,第一层只有一个分支,第二、三、四层都有1x1卷积核起到降维的作用。
所有定义的参数都来自于GoogleNet神经网络的参数表格。
- class Inception(nn.Module):
- def __init__(self, in_channels, ch1x1, ch3x3red, ch3x3, ch5x5red, ch5x5, pool_proj):
- super(Inception, self).__init__()
- self.branch1 = BasicConv2d(in_channels, ch1x1, kernel_size=1)
-
- self.branch2 = nn.Sequential(
- BasicConv2d(in_channels, ch3x3red, kernel_size=1),
- BasicConv2d(ch3x3red, ch3x3, kernel_size=3, padding=1) # padding=1 保证输出大小等于输入大小
- )
-
- self.branch3 = nn.Sequential(
- BasicConv2d(in_channels, ch5x5red, kernel_size=1),
- BasicConv2d(ch5x5red, ch5x5, kernel_size=5, padding=2)
- )
-
- self.branch4 = nn.Sequential(
- nn.MaxPool2d(kernel_size=3, stride=1, padding=1),
- BasicConv2d(in_channels, pool_proj, kernel_size=1)
- )
-
- def forward(self, x): # 将输出的特征矩阵分别输出到branch1234中
- branch1 = self.branch1(x)
- branch2 = self.branch2(x)
- branch3 = self.branch3(x)
- branch4 = self.branch4(x)
- # 将输出放入列表中
- outputs = [branch1, branch2, branch3, branch4]
- # 对四个输出特征矩阵在channel维度进行合并。“1”指需要合并的维度
- return torch.cat(outputs, 1)
def __init__(self, in_channels, num_classes):表示输入特征矩阵的个数和要分类的类别数。
辅助分类器1对应参数:N x 512 x 14 x 14 ,辅助分类器2对应参数:N x 528 x 14 x 14
在经过平均池化下采样后高度和宽度变成了4 x 4
辅助分类器1对应参数:N x 512 x 4 x 4 ,辅助分类器2对应参数:N x 528 x 4 x 4。
x = F.dropout(x, 0.5, training=self.training)
输入特征矩阵按照50%的比例随机失活。
training=self.training
当实例化一个模型model时,可以通过model.train() 和 model.eval()来控制模型的状态,在model.train()模式下,self.training=True, 在model.eval()模式下,self.training=False。
- # 辅助分类器
- class InceptionAux(nn.Module):
- def __init__(self, in_channels, num_classes):
- super(InceptionAux, self).__init__()
- self.averagePool = nn.AvgPool2d(kernel_size=5, stride=3) # 平均池化下采样层
- self.conv = BasicConv2d(in_channels, 128, kernel_size=1) # 1 x 1 的卷积层
-
- # 全连接层
- self.fc1 = nn.Linear(2048, 1024)
- self.fc2 = nn.Linear(1024, num_classes)
-
-
- # 定义正向传播
- def forward(self, x):
- # aux1:N x 512 x 14 x 14 aux2:N x 528 x 14 x 14
- x = self.averagePool(x)
- # aux1:N x 512 x 4 x 4 aux2:N x 528 x 4 x 4。
- x = self.conv(x)
- x = torch.flatten(x, 1) # "1"表示按channel维度展平
- x = F.dropout(x, 0.5, training=self.training)
- x = self.fc2(x)
- return x
def __init__(self, num_classes=1000, aux_logits=True, init_weights=False):
num_classes:分类类别个数
aux_logits: 是否使用辅助分类器
init_weight: 是否为权重进行初始化
self.aux_logits = aux_logits:将变量传入变为类变量
conv1、conv2、conv3 都是使用之前定义的卷积模板文件来定义的。
inception结构使用定义的 Inception结构。
self.avgpool = nn.AdaptiveAvgPool2d((1, 1)):自适应平均池化下采样
括号里输入所需要的输出特征矩阵的高和宽。
使用自适应平均池化下采样的好处:无论输入特征矩阵的高和宽时一个什么样的大小,都能得到所指定的高和宽,这样就可以不用限定输入图像的尺寸了。
- class GoogleNet(nn.Module):
- def __init__(self, num_classes=1000, aux_logits=True, init_weights=False):
- super(GoogleNet, self).__init__()
- self.aux_logits = aux_logits
-
- self.conv1 = BasicConv2d(3, 64, kernel_size=7, stride=2, padding=3)
- self.maxpool1 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
- self.conv2 = BasicConv2d(64, 64, kernel_size=1)
- self.conv3 = BasicConv2d(64, 192, kernel_size=3, padding=1)
- self.maxpool2 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
-
- self.inception3a = Inception(192, 64, 96, 128, 16, 32, 32)
- self.inception3b = Inception(256, 128, 128, 192, 32, 96, 64)
- self.maxpool3 = nn.MaxPool2d(3, stride=2, ceil_mode=True)
-
- self.inception4a = Inception(480, 192, 96, 208, 16, 48, 64)
- self.inception4b = Inception(512, 160, 112, 224, 24, 64, 64)
- self.inception4c = Inception(512, 128, 128, 256, 24, 64, 64)
- self.inception4d = Inception(512, 112, 144, 288, 32, 64, 64)
- self.inception4e = Inception(528, 256, 160, 320, 32, 128, 128)
- self.maxpool4 = nn.MaxPool2d(2, stride=2, ceil_mode=True)
-
- self.inception5a = Inception(832, 256, 160, 320, 32, 128, 128)
- self.inception5b = Inception(832, 384, 192, 384, 48, 128, 128)
-
- if self.aux_logits: # 如果使用辅助分类器
- self.aux1 = InceptionAux(512, num_classes) # 创建辅助分类器1
- self.aux2 = InceptionAux(528, num_classes) # 创建辅助分类器2
-
- self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
- self.dropout = nn.Dropout(0.4) # 40%比例随机失活
- self.fc = nn.Linear(1024, num_classes)
- if init_weights:
- self._initialize_weights() # 如果需要初始化,将会进入模型权重初始化函数
将整个层结构输出出来。
if self.training and self.aux_logits:
aux2 = self.aux2(x)
判断模型是处于训练模式还是验证模式?并判断是否要使用辅助分类器。
- def forward(self, x):
- x = self.conv1(x)
- x = self.maxpool1(x)
- x = self.conv2(x)
- x = self.conv3(x)
- x = self.maxpool2(x)
- x = self.inception3a(x)
- x = self.inception3b(x)
- x = self.maxpool3(x)
- x = self.inception4a(x)
- if self.training and self.aux_logits:
- aux1 = self.aux1(x)
-
- x = self.inception4a(x)
- x = self.inception4b(x)
- x = self.inception4c(x)
- x = self.inception4d(x)
- if self.training and self.aux_logits:
- aux2 = self.aux2(x)
-
- x = self.inception4e(x)
-
- x = self.maxpool4(x)
- x = self.inception5a(x)
- x = self.inception5b(x)
-
- x = self.avgpool(x)
- x = torch.flatten(x, 1)
- x = self.dropout(x)
- x = self.fc(x)
-
- if self.training and self.aux_logits: # 如果处于训练模式并使用了辅助分类器
- return x, aux2, aux1 # 将返回 主分类器、辅助分类器2、辅助分类器1 这样3个参数
- return x # 否则只返回主分类器这一个函数
- def _initialize_weights(self):
- for m in self.modules():
- if isinstance(m, nn.Conv2d): # 卷积层
- nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
- if m.bias is not None:
- nn.init.constant_(m.bias, 0)
- elif isinstance(m, nn.Linear): # 全连接层
- nn.init.normal_(m.weight, 0, 0.01)
- nn.init.constant_(m.bias, 0)
没有报错,训练成功。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。