当前位置:   article > 正文

GHost-ResNet实现 参数量与运算量测试_resnet18参数量和计算量

resnet18参数量和计算量

 

GHost-ResNet实现

  1. import logging
  2. import os
  3. import torch
  4. from torch import nn
  5. import math
  6. class HSwish(nn.Module):
  7. def forward(self, x):
  8. out = x * F.relu6(x+3, inplace=True) / 6
  9. return out
  10. '''
  11. GhostModule 类似一个即插即用的模块 输出尺寸不变 通道数改变了
  12. '''
  13. class GhostModule(nn.Module):
  14. def __init__(self, inp, oup, kernel_size=1, ratio=4, dw_size=3, stride=1, relu=True):
  15. super(GhostModule, self).__init__()
  16. self.oup = oup
  17. init_channels = math.ceil(oup / ratio) # 向上取整 压缩通道数
  18. new_channels = init_channels*(ratio-1) # new_channels < oup
  19. '''
  20. if oup / ratio is integer:
  21. init_channels + new_channels = oup
  22. if oup / ratio is float:
  23. init_channels + new_channels > oup
  24. '''
  25. self.primary_conv = nn.Sequential(
  26. nn.Conv2d(inp, init_channels, kernel_size, stride, kernel_size//2, bias=False), # 尺寸不变
  27. nn.BatchNorm2d(init_channels),
  28. nn.ReLU(inplace=True) if relu else nn.Sequential(),
  29. )
  30. self.cheap_operation = nn.Sequential(
  31. nn.Conv2d(init_channels, new_channels, dw_size, 1, dw_size//2, groups=init_channels, bias=False),
  32. nn.BatchNorm2d(new_channels),
  33. nn.ReLU(inplace=True) if relu else nn.Sequential(),
  34. )
  35. def forward(self, x):
  36. x1 = self.primary_conv(x)
  37. x2 = self.cheap_operation(x1)
  38. out = torch.cat([x1,x2], dim=1)
  39. return out[:,:self.oup,:,:] # 由于拼接后的通道 大于等于 oup,所以这里只取oup
  40. class ConvBNACT(nn.Module):
  41. def __init__(self, in_channels, out_channels, kernel_size, stride=1, padding=0, groups=1, act=None):
  42. super().__init__()
  43. self.conv = nn.Conv2d(
  44. in_channels=in_channels,
  45. out_channels=out_channels,
  46. kernel_size=kernel_size,
  47. stride=stride,
  48. padding=padding,
  49. groups=groups,
  50. bias=False
  51. )
  52. self.bn = nn.BatchNorm2d(out_channels)
  53. if act == "relu":
  54. self.act = nn.ReLU()
  55. elif act == "hard_swish":
  56. self.act = HSwish()
  57. elif act is None:
  58. self.act = None
  59. def forward(self, x):
  60. x = self.conv(x)
  61. x = self.bn(x)
  62. if self.act is not None:
  63. x = self.act(x)
  64. return x
  65. class ConvBNACTWithPool(nn.Module):
  66. def __init__(self, in_channels, out_channels, kernel_size, groups=1, act=None):
  67. super().__init__()
  68. # ceil_mode=True: 将不足的边保留,单独计算
  69. self.pool = nn.AvgPool2d(kernel_size=2, stride=2, padding=0, ceil_mode=True)
  70. self.conv = nn.Conv2d(
  71. in_channels=in_channels,
  72. out_channels=out_channels,
  73. kernel_size=kernel_size,
  74. stride=1,
  75. padding=(kernel_size -1) // 2,
  76. groups=groups,
  77. bias=False
  78. )
  79. self.bn = nn.BatchNorm2d(out_channels)
  80. if act is None:
  81. self.act = None
  82. else:
  83. self.act = nn.ReLU()
  84. def forward(self, x):
  85. x = self.pool(x)
  86. x = self.conv(x)
  87. x = self.bn(x)
  88. if self.act is not None:
  89. x = self.act(x)
  90. return x
  91. class ShortCut(nn.Module):
  92. def __init__(self, in_channels, out_channels, stride, name, if_first=False):
  93. super().__init__()
  94. assert name is not None, "Shortcut must have name"
  95. self.name = name
  96. if in_channels != out_channels or stride != 1:
  97. if if_first:
  98. self.conv = GhostModule(inp=in_channels,oup=out_channels,stride=stride)
  99. else:
  100. self.conv = ConvBNACTWithPool(
  101. in_channels=in_channels,
  102. out_channels=out_channels,
  103. kernel_size=1,
  104. groups=1,
  105. act=None
  106. )
  107. elif if_first:
  108. self.conv = GhostModule(inp=in_channels,oup=out_channels,stride=stride)
  109. else:
  110. self.conv = None
  111. def forward(self, x):
  112. if self.conv is not None:
  113. x = self.conv(x)
  114. return x
  115. class BottleneckBlock(nn.Module):
  116. def __init__(self, in_channels, out_channels, stride, if_first, name):
  117. super().__init__()
  118. assert name is not None, "Bottleneck must have name"
  119. self.name = name
  120. self.conv0 = GhostModule(inp=in_channels, oup=out_channels)
  121. self.conv1 = GhostModule(inp=out_channels, oup=out_channels, stride=stride)
  122. self.conv2 = GhostModule(inp=out_channels, oup=out_channels*4)
  123. self.shortcut = ShortCut(
  124. in_channels=in_channels,
  125. out_channels=out_channels * 4,
  126. stride=stride,
  127. if_first=if_first,
  128. name=f"{name}_branch1"
  129. )
  130. self.relu = nn.ReLU()
  131. self.output_channels = out_channels * 4
  132. def forward(self, x):
  133. out = self.conv0(x)
  134. out = self.conv1(out)
  135. out = self.conv2(out)
  136. out = out + self.shortcut(x)
  137. return self.relu(out)
  138. class BasicBlock(nn.Module):
  139. def __init__(self, in_channels, out_channels, stride, if_first, name):
  140. super().__init__()
  141. assert name is not None, "Block must have name"
  142. self.name = name
  143. self.conv0 = GhostModule(inp=in_channels, oup=out_channels,stride=stride)
  144. self.conv1 = GhostModule(inp=out_channels, oup=out_channels)
  145. self.shortcut = ShortCut(
  146. in_channels=in_channels,
  147. out_channels=out_channels,
  148. stride=stride,
  149. name=f"{name}_branch1",
  150. if_first=if_first
  151. )
  152. self.relu = nn.ReLU()
  153. self.output_channels = out_channels
  154. def forward(self, x):
  155. out = self.conv0(x)
  156. out = self.conv1(out)
  157. out = out + self.shortcut(x)
  158. return self.relu(out)
  159. class ResNet(nn.Module):
  160. def __init__(self, in_channels, layers, pretrained=True, **kwargs):
  161. super().__init__()
  162. supported_layers = {
  163. 18: {"depth": [2, 2, 2, 2], "block_class": BasicBlock},
  164. 34: {"depth": [3, 4, 6, 3], "block_class": BasicBlock},
  165. 50: {"depth": [3, 4, 6, 3], "block_class": BottleneckBlock},
  166. 101: {"depth": [3, 4, 23, 3], "block_class": BottleneckBlock},
  167. 152: {"depth": [3, 8, 36, 3], "block_class": BottleneckBlock},
  168. 200: {"depth": [3, 12, 48, 3], "block_class": BottleneckBlock}
  169. }
  170. assert layers in supported_layers, "Supported layers are {} but input layer is {}".format(supported_layers, layers)
  171. depth = supported_layers[layers]["depth"]
  172. block_class = supported_layers[layers]["block_class"]
  173. # num_filters = [64, 128, 256, 512]
  174. num_filters = [32, 64, 128, 256]
  175. # num_filters = [16, 32, 64, 128]
  176. self.conv1 = nn.Sequential(
  177. GhostModule(inp=in_channels, oup=32, stride=2),
  178. GhostModule(inp=32, oup=32),
  179. GhostModule(inp=32, oup=64),
  180. )
  181. self.pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
  182. self.stages = nn.ModuleList()
  183. self.out_channels = []
  184. in_ch = 64
  185. for block_index in range(len(depth)): # 以ResNet50为例
  186. block_list = []
  187. for i in range(depth[block_index]):
  188. if layers >= 50:
  189. if layers in [101, 152, 200] and block_index == 2:
  190. if i == 0:
  191. conv_name = "res" + str(block_index + 2) + "a"
  192. else:
  193. conv_name = "res" + str(block_index + 2) + "b" + str(i)
  194. else:
  195. # 97对应a,chr:返回对应的ASCLL字符
  196. conv_name = "res" + str(block_index + 2) + chr(97 + i)
  197. else:
  198. conv_name = "res{}{}".format(str(block_index + 2), chr(97 + i))
  199. temp_stride = 1
  200. if i == 0 and block_index != 0:
  201. temp_stride = 2
  202. block_list.append(
  203. block_class(
  204. in_channels=in_ch,
  205. out_channels=num_filters[block_index],
  206. stride=temp_stride,
  207. if_first=block_index == i == 0,
  208. name=conv_name
  209. )
  210. )
  211. in_ch = block_list[-1].output_channels
  212. self.out_channels.append(in_ch)
  213. self.stages.append(nn.Sequential(*block_list))
  214. if pretrained:
  215. ckpt_path = "./configs/pretrain/imagenet/resnet{}_vd.pth".format(layers)
  216. logger = logging.getLogger("networks/backbones/DetResNetvd.py")
  217. if os.path.exists(ckpt_path):
  218. logger.info("Load imagenet weights")
  219. self.load_state_dict(torch.load(ckpt_path))
  220. else:
  221. logger.info("{} not exists".format(ckpt_path))
  222. def forward(self, x):
  223. x = self.conv1(x)
  224. x = self.pool1(x)
  225. out = []
  226. for stage in self.stages:
  227. x = stage(x)
  228. out.append(x)
  229. return out
  230. if __name__ == "__main__":
  231. # neckblok = BottleneckBlock(in_channels=3, out_channels=32, stride=2, if_first=True, name="1")
  232. # baseblok = BasicBlock(in_channels=3, out_channels=32, stride=2, if_first=True, name="2")
  233. # x = torch.randn(2,3,640,640)
  234. # y = neckblok(x)
  235. # print(y.shape)
  236. # yy = baseblok(x)
  237. # print(yy.shape)
  238. # GHost_Resnet50 = ResNet(in_channels=3, layers=50)
  239. # GHost_Resnet18 = ResNet(in_channels=3, layers=18)
  240. # yyy = GHost_Resnet50(x)
  241. # print(yyy[0].shape,yyy[1].shape,yyy[2].shape,yyy[3].shape)
  242. # yyyy = GHost_Resnet18(x)
  243. # print(yyyy[0].shape,yyyy[1].shape,yyyy[2].shape,yyyy[3].shape)
  244. from torchsummaryX import summary
  245. # from thop import profile
  246. # from thop import clever_format
  247. # myNet50 = ResNet(in_channels=3, layers=50).cuda() # 实例化网络,可以换成自己的网络
  248. # input = torch.randn(1, 3, 640, 640).cuda()
  249. # macs, params = profile(myNet50, inputs=(input, ))
  250. # macs, params = clever_format([macs, params], "%.3f")
  251. # print(macs,params)
  252. x = torch.randn(2,3,640,640).cuda()
  253. myNet50 = ResNet(in_channels=3, layers=50).cuda() # 实例化网络,可以换成自己的网络
  254. summary(myNet50, torch.randn(1,3, 640, 640).cuda()) # 输出网络结构
  255. yyy = myNet50(x)
  256. print(yyy[0].shape,yyy[1].shape,yyy[2].shape,yyy[3].shape)
  257. # summary(myNet50, (3, 640, 640)) # 输出网络结构

参数量和计算量测试

 

参数量和计算量测试

网络 参数量 运算量

ResNet50 23M 35G

ResNet18 11M 16G

GHost-ResNet50(s=2) 8M 11G

GHost-ResNet50(s=4) 5M 7G

GHost-ResNet18(s=2) 813K 1.25G

GHost-ResNet18(s=4) 516K 827M

 

原文数据

a747c22c28b74fefa220ca1560b29f8f.png

 结论

结构定义有点区别,数据量有点出入,但是不妨碍压缩模型,也好用!

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/知新_RL/article/detail/161709
推荐阅读
相关标签
  

闽ICP备14008679号