当前位置:   article > 正文

即插即用的涨点神器ODConv(内含调试好的代码)_odconv2d

odconv2d
  1. import torch
  2. import torch.nn as nn
  3. import torch.nn.functional as F
  4. import torch.autograd
  5. class Attention(nn.Module):
  6. def __init__(self, in_planes, out_planes, kernel_size, groups=1, reduction=0.0625, kernel_num=4, min_channel=16):
  7. super(Attention, self).__init__()
  8. attention_channel = max(int(in_planes * reduction), min_channel)
  9. self.kernel_size = kernel_size
  10. self.kernel_num = kernel_num
  11. self.temperature = 1.0
  12. self.avgpool = nn.AdaptiveAvgPool2d(1)
  13. self.fc = nn.Conv2d(in_planes, attention_channel, 1, bias=False)
  14. self.bn = nn.BatchNorm2d(attention_channel)
  15. self.relu = nn.ReLU(inplace=True)
  16. self.channel_fc = nn.Conv2d(attention_channel, in_planes, 1, bias=True)
  17. self.func_channel = self.get_channel_attention
  18. if in_planes == groups and in_planes == out_planes: # depth-wise convolution
  19. self.func_filter = self.skip
  20. else:
  21. self.filter_fc = nn.Conv2d(attention_channel, out_planes, 1, bias=True)
  22. self.func_filter = self.get_filter_attention
  23. if kernel_size == 1: # point-wise convolution
  24. self.func_spatial = self.skip
  25. else:
  26. self.spatial_fc = nn.Conv2d(attention_channel, kernel_size * kernel_size, 1, bias=True)
  27. self.func_spatial = self.get_spatial_attention
  28. if kernel_num == 1:
  29. self.func_kernel = self.skip
  30. else:
  31. self.kernel_fc = nn.Conv2d(attention_channel, kernel_num, 1, bias=True)
  32. self.func_kernel = self.get_kernel_attention
  33. self._initialize_weights()
  34. def _initialize_weights(self):
  35. for m in self.modules():
  36. if isinstance(m, nn.Conv2d):
  37. nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
  38. if m.bias is not None:
  39. nn.init.constant_(m.bias, 0)
  40. if isinstance(m, nn.BatchNorm2d):
  41. nn.init.constant_(m.weight, 1)
  42. nn.init.constant_(m.bias, 0)
  43. def update_temperature(self, temperature):
  44. self.temperature = temperature
  45. @staticmethod
  46. def skip(_):
  47. return 1.0
  48. def get_channel_attention(self, x):
  49. channel_attention = torch.sigmoid(self.channel_fc(x).view(x.size(0), -1, 1, 1) / self.temperature)
  50. return channel_attention
  51. def get_filter_attention(self, x):
  52. filter_attention = torch.sigmoid(self.filter_fc(x).view(x.size(0), -1, 1, 1) / self.temperature)
  53. return filter_attention
  54. def get_spatial_attention(self, x):
  55. spatial_attention = self.spatial_fc(x).view(x.size(0), 1, 1, 1, self.kernel_size, self.kernel_size)
  56. spatial_attention = torch.sigmoid(spatial_attention / self.temperature)
  57. return spatial_attention
  58. def get_kernel_attention(self, x):
  59. kernel_attention = self.kernel_fc(x).view(x.size(0), -1, 1, 1, 1, 1)
  60. kernel_attention = F.softmax(kernel_attention / self.temperature, dim=1)
  61. return kernel_attention
  62. def forward(self, x):
  63. x = self.avgpool(x)
  64. x = self.fc(x)
  65. x = self.bn(x)
  66. x = self.relu(x)
  67. return self.func_channel(x), self.func_filter(x), self.func_spatial(x), self.func_kernel(x)
  68. class ODConv2d(nn.Module):
  69. def __init__(self, in_planes, out_planes, kernel_size=3, stride=1, padding=1, dilation=1, groups=1,
  70. reduction=0.0625, kernel_num=4):
  71. super(ODConv2d, self).__init__()
  72. self.in_planes = in_planes
  73. self.out_planes = out_planes
  74. self.kernel_size = kernel_size
  75. self.stride = stride
  76. self.padding = padding
  77. self.dilation = dilation
  78. self.groups = groups
  79. self.kernel_num = kernel_num
  80. self.attention = Attention(in_planes, out_planes, kernel_size, groups=groups,
  81. reduction=reduction, kernel_num=kernel_num)
  82. self.weight = nn.Parameter(torch.randn(kernel_num, out_planes, in_planes // groups, kernel_size, kernel_size),
  83. requires_grad=True)
  84. self._initialize_weights()
  85. if self.kernel_size == 1 and self.kernel_num == 1:
  86. self._forward_impl = self._forward_impl_pw1x
  87. else:
  88. self._forward_impl = self._forward_impl_common
  89. def _initialize_weights(self):
  90. for i in range(self.kernel_num):
  91. nn.init.kaiming_normal_(self.weight[i], mode='fan_out', nonlinearity='relu')
  92. def update_temperature(self, temperature):
  93. self.attention.update_temperature(temperature)
  94. def _forward_impl_common(self, x):
  95. # Multiplying channel attention (or filter attention) to weights and feature maps are equivalent,
  96. # while we observe that when using the latter method the models will run faster with less gpu memory cost.
  97. channel_attention, filter_attention, spatial_attention, kernel_attention = self.attention(x)
  98. batch_size, in_planes, height, width = x.size()
  99. x = x * channel_attention
  100. x = x.reshape(1, -1, height, width)
  101. aggregate_weight = spatial_attention * kernel_attention * self.weight.unsqueeze(dim=0)
  102. aggregate_weight = torch.sum(aggregate_weight, dim=1).view(
  103. [-1, self.in_planes // self.groups, self.kernel_size, self.kernel_size])
  104. output = F.conv2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding,
  105. dilation=self.dilation, groups=self.groups * batch_size)
  106. output = output.view(batch_size, self.out_planes, output.size(-2), output.size(-1))
  107. output = output * filter_attention
  108. return output
  109. def _forward_impl_pw1x(self, x):
  110. channel_attention, filter_attention, spatial_attention, kernel_attention = self.attention(x)
  111. x = x * channel_attention
  112. output = F.conv2d(x, weight=self.weight.squeeze(dim=0), bias=None, stride=self.stride, padding=self.padding,
  113. dilation=self.dilation, groups=self.groups)
  114. output = output * filter_attention
  115. return output
  116. def forward(self, x):
  117. return self._forward_impl(x)
  118. a = torch.ones(3,8,20,20)
  119. b = ODConv2d(8,8)
  120. c = b(a)
  121. print(c.size())

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/花生_TL007/article/detail/366815
推荐阅读
  

闽ICP备14008679号