赞
踩
-
- import logging
- import os
- import torch
- from torch import nn
- import math
-
-
-
- class HSwish(nn.Module):
- def forward(self, x):
- out = x * F.relu6(x+3, inplace=True) / 6
- return out
-
- '''
- GhostModule 类似一个即插即用的模块 输出尺寸不变 通道数改变了
- '''
- class GhostModule(nn.Module):
- def __init__(self, inp, oup, kernel_size=1, ratio=4, dw_size=3, stride=1, relu=True):
- super(GhostModule, self).__init__()
- self.oup = oup
- init_channels = math.ceil(oup / ratio) # 向上取整 压缩通道数
- new_channels = init_channels*(ratio-1) # new_channels < oup
-
- '''
- if oup / ratio is integer:
- init_channels + new_channels = oup
- if oup / ratio is float:
- init_channels + new_channels > oup
- '''
-
- self.primary_conv = nn.Sequential(
- nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), # 尺寸不变
- nn.BatchNorm2d(init_channels),
- nn.ReLU(inplace=True) if relu else nn.Sequential(),
- )
-
- self.cheap_operation = nn.Sequential(
- nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False),
- nn.BatchNorm2d(new_channels),
- nn.ReLU(inplace=True) if relu else nn.Sequential(),
- )
-
- def forward(self, x):
- x1 = self.primary_conv(x)
- x2 = self.cheap_operation(x1)
- out = torch.cat([x1,x2], dim=1)
- return out[:,:self.oup,:,:] # 由于拼接后的通道 大于等于 oup,所以这里只取oup
-
- class ConvBNACT(nn.Module):
- def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):
- super().__init__()
- self.conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=stride,
- padding=padding,
- groups=groups,
- bias=False
- )
- self.bn = nn.BatchNorm2d(out_channels)
- if act == "relu":
- self.act = nn.ReLU()
- elif act == "hard_swish":
- self.act = HSwish()
- elif act is None:
- self.act = None
-
- def forward(self, x):
- x = self.conv(x)
- x = self.bn(x)
- if self.act is not None:
- x = self.act(x)
- return x
-
-
- class ConvBNACTWithPool(nn.Module):
- def __init__(self, in_channels, out_channels, kernel_size, groups=1, act=None):
- super().__init__()
- # ceil_mode=True: 将不足的边保留,单独计算
- self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)
- self.conv = nn.Conv2d(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=kernel_size,
- stride=1,
- padding=(kernel_size -1) // 2,
- groups=groups,
- bias=False
- )
- self.bn = nn.BatchNorm2d(out_channels)
- if act is None:
- self.act = None
- else:
- self.act = nn.ReLU()
-
- def forward(self, x):
- x = self.pool(x)
- x = self.conv(x)
- x = self.bn(x)
- if self.act is not None:
- x = self.act(x)
- return x
-
-
- class ShortCut(nn.Module):
- def __init__(self, in_channels, out_channels, stride, name, if_first=False):
- super().__init__()
- assert name is not None, "Shortcut must have name"
- self.name = name
- if in_channels != out_channels or stride != 1:
- if if_first:
- self.conv = GhostModule(inp=in_channels,oup=out_channels,stride=stride)
- else:
- self.conv = ConvBNACTWithPool(
- in_channels=in_channels,
- out_channels=out_channels,
- kernel_size=1,
- groups=1,
- act=None
- )
-
- elif if_first:
- self.conv = GhostModule(inp=in_channels,oup=out_channels,stride=stride)
- else:
- self.conv = None
-
- def forward(self, x):
- if self.conv is not None:
- x = self.conv(x)
- return x
-
-
- class BottleneckBlock(nn.Module):
- def __init__(self, in_channels, out_channels, stride, if_first, name):
- super().__init__()
- assert name is not None, "Bottleneck must have name"
- self.name = name
- self.conv0 = GhostModule(inp=in_channels, oup=out_channels)
- self.conv1 = GhostModule(inp=out_channels, oup=out_channels, stride=stride)
- self.conv2 = GhostModule(inp=out_channels, oup=out_channels*4)
- self.shortcut = ShortCut(
- in_channels=in_channels,
- out_channels=out_channels * 4,
- stride=stride,
- if_first=if_first,
- name=f"{name}_branch1"
- )
- self.relu = nn.ReLU()
- self.output_channels = out_channels * 4
-
- def forward(self, x):
- out = self.conv0(x)
- out = self.conv1(out)
- out = self.conv2(out)
- out = out + self.shortcut(x)
- return self.relu(out)
-
-
- class BasicBlock(nn.Module):
- def __init__(self, in_channels, out_channels, stride, if_first, name):
- super().__init__()
- assert name is not None, "Block must have name"
- self.name = name
- self.conv0 = GhostModule(inp=in_channels, oup=out_channels,stride=stride)
- self.conv1 = GhostModule(inp=out_channels, oup=out_channels)
- self.shortcut = ShortCut(
- in_channels=in_channels,
- out_channels=out_channels,
- stride=stride,
- name=f"{name}_branch1",
- if_first=if_first
- )
- self.relu = nn.ReLU()
- self.output_channels = out_channels
-
- def forward(self, x):
- out = self.conv0(x)
- out = self.conv1(out)
- out = out + self.shortcut(x)
- return self.relu(out)
-
-
- class ResNet(nn.Module):
- def __init__(self, in_channels, layers, pretrained=True, **kwargs):
- super().__init__()
- supported_layers = {
- 18: {"depth": [2, 2, 2, 2], "block_class": BasicBlock},
- 34: {"depth": [3, 4, 6, 3], "block_class": BasicBlock},
- 50: {"depth": [3, 4, 6, 3], "block_class": BottleneckBlock},
- 101: {"depth": [3, 4, 23, 3], "block_class": BottleneckBlock},
- 152: {"depth": [3, 8, 36, 3], "block_class": BottleneckBlock},
- 200: {"depth": [3, 12, 48, 3], "block_class": BottleneckBlock}
- }
- assert layers in supported_layers, "Supported layers are {} but input layer is {}".format(supported_layers, layers)
- depth = supported_layers[layers]["depth"]
- block_class = supported_layers[layers]["block_class"]
-
- # num_filters = [64, 128, 256, 512]
- num_filters = [32, 64, 128, 256]
- # num_filters = [16, 32, 64, 128]
-
- self.conv1 = nn.Sequential(
- GhostModule(inp=in_channels, oup=32, stride=2),
- GhostModule(inp=32, oup=32),
- GhostModule(inp=32, oup=64),
- )
-
- self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
- self.stages = nn.ModuleList()
- self.out_channels = []
- in_ch = 64
- for block_index in range(len(depth)): # 以ResNet50为例
- block_list = []
- for i in range(depth[block_index]):
- if layers >= 50:
- if layers in [101, 152, 200] and block_index == 2:
- if i == 0:
- conv_name = "res" + str(block_index + 2) + "a"
- else:
- conv_name = "res" + str(block_index + 2) + "b" + str(i)
- else:
- # 97对应a,chr:返回对应的ASCLL字符
- conv_name = "res" + str(block_index + 2) + chr(97 + i)
- else:
- conv_name = "res{}{}".format(str(block_index + 2), chr(97 + i))
- temp_stride = 1
- if i == 0 and block_index != 0:
- temp_stride = 2
- block_list.append(
- block_class(
- in_channels=in_ch,
- out_channels=num_filters[block_index],
- stride=temp_stride,
- if_first=block_index == i == 0,
- name=conv_name
- )
- )
- in_ch = block_list[-1].output_channels
- self.out_channels.append(in_ch)
- self.stages.append(nn.Sequential(*block_list))
-
- if pretrained:
- ckpt_path = "./configs/pretrain/imagenet/resnet{}_vd.pth".format(layers)
- logger = logging.getLogger("networks/backbones/DetResNetvd.py")
- if os.path.exists(ckpt_path):
- logger.info("Load imagenet weights")
- self.load_state_dict(torch.load(ckpt_path))
- else:
- logger.info("{} not exists".format(ckpt_path))
-
- def forward(self, x):
- x = self.conv1(x)
- x = self.pool1(x)
- out = []
- for stage in self.stages:
- x = stage(x)
- out.append(x)
- return out
-
-
- if __name__ == "__main__":
- # neckblok = BottleneckBlock(in_channels=3, out_channels=32, stride=2, if_first=True, name="1")
- # baseblok = BasicBlock(in_channels=3, out_channels=32, stride=2, if_first=True, name="2")
- # x = torch.randn(2,3,640,640)
- # y = neckblok(x)
- # print(y.shape)
- # yy = baseblok(x)
- # print(yy.shape)
-
- # GHost_Resnet50 = ResNet(in_channels=3, layers=50)
- # GHost_Resnet18 = ResNet(in_channels=3, layers=18)
- # yyy = GHost_Resnet50(x)
- # print(yyy[0].shape,yyy[1].shape,yyy[2].shape,yyy[3].shape)
- # yyyy = GHost_Resnet18(x)
- # print(yyyy[0].shape,yyyy[1].shape,yyyy[2].shape,yyyy[3].shape)
-
- from torchsummaryX import summary
- # from thop import profile
- # from thop import clever_format
- # myNet50 = ResNet(in_channels=3, layers=50).cuda() # 实例化网络,可以换成自己的网络
- # input = torch.randn(1, 3, 640, 640).cuda()
- # macs, params = profile(myNet50, inputs=(input, ))
- # macs, params = clever_format([macs, params], "%.3f")
- # print(macs,params)
- x = torch.randn(2,3,640,640).cuda()
- myNet50 = ResNet(in_channels=3, layers=50).cuda() # 实例化网络,可以换成自己的网络
- summary(myNet50, torch.randn(1,3, 640, 640).cuda()) # 输出网络结构
- yyy = myNet50(x)
- print(yyy[0].shape,yyy[1].shape,yyy[2].shape,yyy[3].shape)
-
- # summary(myNet50, (3, 640, 640)) # 输出网络结构
-
参数量和计算量测试
网络 参数量 运算量
ResNet50 23M 35G
ResNet18 11M 16G
GHost-ResNet50(s=2) 8M 11G
GHost-ResNet50(s=4) 5M 7G
GHost-ResNet18(s=2) 813K 1.25G
GHost-ResNet18(s=4) 516K 827M
结构定义有点区别,数据量有点出入,但是不妨碍压缩模型,也好用!
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。