赞
踩
对于resnet50,输入一张(1,3,224,224)的图片,经过stem(用Sequential写的一个conv层)进行压缩得到(1,64,56,56)然后丢到layer1 ->layer4中,最终输出(1,2048,7,7),如下
resnet更多细节
为什么要引入残差1
为什么要引入残差2
有时你会纠结它这个代码为啥要这样写,这完全在于resnet50自己的特点,如果换个model的话,网络的每个类,可能就不是这样写了
resnet50特点描述
conv block:加边缘分支downsample=True,输出输出通道发生改变,不能连续串联,作用是增大通道数
identity block:无分支,通道数不变,可以串联,用于加深网络。
inplanes:输入通道
planes:中间处理时候的通道
planes*expansion:输出维度
downsample:要不要残差边上加conv,downsample=True:加conv
Bottleneck类的特点:
1.对于左分支有三层的conv
conv1:conv(input,mid)维度变
conv2:conv(mid,mid)维度不变
conv3:conv(mid,out)维度变
2.每一个layer都是以conv block起步,以上都是identity block
3.对于stride,所有的Identity的步长都是1,Conv block的stride是变动的
对于Bottleneck-用来生成上图conv block or identity block,代码对着上面的网络结构图也比较容易理解,对我来讲make_layer的过程比较复杂,所以记录一下。
调用函数生成layer1 = self._make_layer(block, 64, layers[0],stride=1)
downsample
=nn.Sequential()block(self.inplanes=64, planes, stride, downsample))
生成conv Block,然后append进行listblock(self.inplanes, planes)
)return nn.Sequential(*layers)
调用nn.Sequential将layers列表中的一个个独立的残差块,按放入list的先后,首尾相连,得到一个output的输出找网络的某一层
list(resnet.children())[4][0].conv1
#利用.childern()生成list形式
resnet.layer1[0].conv1
#直接根据print(resnet)找到需要的层
这也验证了生成layer1的过程:多个Bottleneck,append进行list里,list -> layer1,调用时:resnet.layer1[0].conv1
每个layer的残差块的数量图
代码
其中self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride)
对于最后一层的stride原本=2,因为stride=2特征图会变小从(14,14)->(7,7),有学者认为特征图过小,提出的信息不好,这里将stride设置成可变的,根据需求设为1。
from torch import nn import torch """ conv block:加边缘分支downsample=True,输出输出通道发生改变,不能连续串联,作用是增大通道数 identity block:无分支,通道数不变,可以串联,用于加深网络。 inplanes:输入通道 planes:中间处理时候的通道 planes*expansion:输出维度 downsample:要不要残差边上加conv,downsample=True:加conv Bottleneck类的特点: 1.对于左分支有三层的conv conv1:conv(input,mid)维度变 conv2:conv(mid,mid)维度不变 conv3:conv(mid,out)维度变 2.每一个layer都是以conv block起步,以上都是identity block 3.对于stride,所有的Identity的步长都是1,Conv block的stride是变动的 """ class Bottleneck(nn.Module):#(残差块的生成)block类,控制生成conv block or identity block expansion = 4#每个stage中维度拓展的倍数,通过网络图发现,输出通道都是mid层的4倍 def __init__(self, inplanes, planes, stride=1, downsample=None):#inplanes:输入通道 planes:中间处理时候的通道,需要的参数是(输入通道,中间通道,输入步长)其他conv步长在网络中已经写好,out_channel由midchannel x expansion获得 super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)#(256,64)(64,64)(64,64x4) self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) # 是否直连(如果时Identity block就是直连;如果是Conv Block就需要对参差边进行卷积,改变通道数和size) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, last_stride=2, block=Bottleneck,layers=[3, 4, 6, 3]):#layers的数据描述的是一层中conv层+identity层的数量 self.inplanes = 64#经过前面的stem预处理后,生成64通道 super().__init__() # stem的网络层 self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) # self.relu = nn.ReLU(inplace=True) # add missed relu self.maxpool = nn.MaxPool2d(kernel_size=2, stride=None, padding=0) self.layer1 = self._make_layer(block, 64, layers[0],stride=1) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=last_stride) #planes=64,128,256,512指mid的维度 #blocksn_num=layers[0]=3表明layer1中有3个残差块(包括conv+Identity) #stride=1指的是每个layer的起始conv的strip,既是残差块的左分支的步长,又是右分支步长 def _make_layer(self, block, planes, blocks_num, stride=1): downsample = None # 残差块右面小分支downsample模块 if stride != 1 or self.inplanes != planes * block.expansion:#如果stride!=1则要加conv,输出不等于输入也要加conv downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = []#存放每个layers的残差Block # Conv Block(对于每层都是要先经过conv Block,然后在Identity block,所以conv先加入list中) layers.append(block(self.inplanes, planes, stride, downsample))#生成conv Block,#resnet初始化时就有了inplane,调用make—layer层时赋值了(plane,stride) #planes=64是中间层,下面这行操作是为下一个残差块生成输入通道数 self.inplanes = planes * block.expansion #identity block,通过block来查看某层layers需要多少个identity block for i in range(1, blocks_num):#blocks是个数字 layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) # x = self.relu(x) # add missed relu x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) return x if __name__ == '__main__': resnet = ResNet(block=Bottleneck) # #layer1测试 # print(resnet.layer1) # x=torch.Tensor(1,64,56,56) # layer1=resnet.layer1 # print(layer1(x).shape)#>torch.Size([1, 256, 56, 56]),和layer1输出维度一样 img=torch.Tensor(1,3,224,224) out=resnet(img) print(out.shape)
resnet101,resnet152可以直接调用上面写好的ResNet类来生成
分析:
这几个网络都是由4个layer组成,不同的是每个layer里的block数量不一样,因为每个layer只有第一个是conv块,其他都是identity块(可以无限拼接,起到加深网络作用),所以就产生了resnet101,resnet152网络
def resnet50(last_stride, num_classes=1000, **kwargs): # 拼接模型 """Constructs a ResNet-50 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(last_stride, SEBottleneck, [3, 4, 6, 3], num_classes=num_classes, **kwargs) # 通过这个列表来控制resnet的层数 model.avgpool = nn.AdaptiveAvgPool2d(1) return model def resnet101(last_stride, num_classes=1000, **kwargs): model = ResNet(last_stride, SEBottleneck, [3, 4, 23, 3], num_classes=num_classes, **kwargs) model.avgpool = nn.AdaptiveAvgPool2d(1) return model def resnet152(last_stride, num_classes): model = ResNet(last_stride, SEBottleneck, [3, 8, 36, 3], num_classes=num_classes) model.avgpool = nn.AdaptiveAvgPool2d(1) return model
import torch.nn as nn import torch class SELayer(nn.Module): def __init__(self, channel, reduction=16): super(SELayer, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc = nn.Sequential( nn.Linear(channel, int(channel/reduction), bias=False), nn.ReLU(inplace=True), nn.Linear(int(channel/reduction), channel, bias=False), nn.Sigmoid() ) def forward(self, x): #self.avg_pool(x)=(32,30,1,1)本来的卷积后生成这个,但这个不能直接用于liner,所以有了以下的变化过程 b, c, _, _ = x.size() y = self.avg_pool(x).view(b, c)#(32,30) y = self.fc(y).view(b, c, 1, 1)#(32,30,1,1) return x * y.expand_as(x) if __name__ =='__main__': x=torch.Tensor(32,30,256,128) SE=SELayer(30,reduction=2) SE(x)
import torch.nn as nn import torch class SEModule(nn.Module): def __init__(self, channels, reduction):#reduction表示缩减率c/r super(SEModule, self).__init__() self.avg_pool = nn.AdaptiveAvgPool2d(1) self.fc1 = nn.Conv2d(channels, channels // reduction, kernel_size=1, padding=0) self.relu = nn.ReLU(inplace=True) self.fc2 = nn.Conv2d(channels // reduction, channels, kernel_size=1, padding=0) self.sigmoid = nn.Sigmoid() def forward(self, x): module_input = x x = self.avg_pool(x) x = self.fc1(x) x = self.relu(x) x = self.fc2(x) x = self.sigmoid(x) return module_input * x
#1.
x = torch.rand(4,8,6)
y = torch.split(x,2,dim=0)
#按照4这个维度去分,按每块=2的大小切分,返回y为list
若x1 = torch.rand(5,8,6)
y = torch.split(x,2,dim=0),则一共生成3块,大小=2,2,1
#2.
y = torch.split(x,[2,3,3],dim=1)
#按每块分别=[2,3,3]的方式去切
#将30个通道切成两半,得到两个list(32,15,256,128),分别用IN,BN处理 class IBN(nn.Module): def __init__(self, planes): super(IBN, self).__init__() half1 = int(planes / 2) self.half = half1 half2 = planes - half1 self.IN = nn.InstanceNorm2d(half1, affine=True) self.BN = nn.BatchNorm2d(half2) def forward(self, x): split = torch.split(x, self.half, 1)##将30个通道,切成两半,得到两个list(32,15,256,128) out1 = self.IN(split[0].contiguous()) out2 = self.BN(split[1].contiguous()) out = torch.cat((out1, out2), 1) return out if __name__ =='__main__': x=torch.Tensor(32,30,256,128) IBN=IBN(30) IBN(x)
如何把SE,IBN融合到要生成resnet的bottleneck上?
class SEBottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None, ibn=False, reduction=16): super(SEBottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False) if ibn: self.bn1 = IBN(planes) #每个block的第一个conv后,即浅层,才能考虑是否加IBN else: self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.se = SELayer(planes * 4, reduction)#block的输出通道数,即为SE模块的输入通道数 self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) out = self.se(out)#se加在左分支,downsample不加se if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。