当前位置:   article > 正文

ODConv详解

odconv

ODConv的代码https://github.com/OSVAI/ODConv/blob/main/modules/odconv.py

ODConv引入了一种多维注意机制,该机制采用并行策略,可以沿核空间的所有四个维度学习卷积核的不同注意。

 

kernel_size(图中k)等于1并且kernel_num(图中k_num)等于1时,ODConv架构图

ODConv的代码,得先看懂Attention这个类

  1. class ODConv2d(nn.Module):
  2. def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1, groups=1,
  3. reduction=0.0625, kernel_num=4):
  4. super(ODConv2d, self).__init__()
  5. self.in_planes = in_planes
  6. self.out_planes = out_planes
  7. self.kernel_size = kernel_size
  8. self.stride = stride
  9. self.padding = padding
  10. self.dilation = dilation
  11. self.groups = groups
  12. self.kernel_num = kernel_num
  13. self.attention = Attention(in_planes, out_planes, kernel_size, groups=groups,
  14. reduction=reduction, kernel_num=kernel_num)
  15. self.weight = nn.Parameter(torch.randn(kernel_num, out_planes, in_planes//groups, kernel_size, kernel_size),
  16. requires_grad=True)
  17. self._initialize_weights()
  18. if self.kernel_size == 1 and self.kernel_num == 1:
  19. self._forward_impl = self._forward_impl_pw1x
  20. else:
  21. self._forward_impl = self._forward_impl_common
  22. def _initialize_weights(self):
  23. for i in range(self.kernel_num):
  24. nn.init.kaiming_normal_(self.weight[i], mode='fan_out', nonlinearity='relu')
  25. def update_temperature(self, temperature):
  26. self.attention.update_temperature(temperature)
  27. def _forward_impl_common(self, x):
  28. # Multiplying channel attention (or filter attention) to weights and feature maps are equivalent,
  29. # while we observe that when using the latter method the models will run faster with less gpu memory cost.
  30. channel_attention, filter_attention, spatial_attention, kernel_attention = self.attention(x)
  31. batch_size, in_planes, height, width = x.size()
  32. x = x * channel_attention
  33. x = x.reshape(1, -1, height, width)
  34. aggregate_weight = spatial_attention * kernel_attention * self.weight.unsqueeze(dim=0)
  35. aggregate_weight = torch.sum(aggregate_weight, dim=1).view(
  36. [-1, self.in_planes // self.groups, self.kernel_size, self.kernel_size])
  37. output = F.conv2d(x, weight=aggregate_weight, bias=None, stride=self.stride, padding=self.padding,
  38. dilation=self.dilation, groups=self.groups * batch_size)
  39. output = output.view(batch_size, self.out_planes, output.size(-2), output.size(-1))
  40. output = output * filter_attention
  41. return output
  42. def _forward_impl_pw1x(self, x):
  43. channel_attention, filter_attention, spatial_attention, kernel_attention = self.attention(x)
  44. x = x * channel_attention
  45. output = F.conv2d(x, weight=self.weight.squeeze(dim=0), bias=None, stride=self.stride, padding=self.padding,
  46. dilation=self.dilation, groups=self.groups)
  47. output = output * filter_attention
  48. return output
  49. def forward(self, x):
  50. return self._forward_impl(x)

 结合Attention这个类的代码看更清楚,图中橙色的Conv2d的bias=True

  1. class Attention(nn.Module):
  2. def __init__(self, in_planes, out_planes, kernel_size, groups=1, reduction=0.0625, kernel_num=4, min_channel=16):
  3. super(Attention, self).__init__()
  4. attention_channel = max(int(in_planes * reduction), min_channel)
  5. self.kernel_size = kernel_size
  6. self.kernel_num = kernel_num
  7. self.temperature = 1.0
  8. self.avgpool = nn.AdaptiveAvgPool2d(1)
  9. self.fc = nn.Conv2d(in_planes, attention_channel, 1, bias=False)
  10. self.bn = nn.BatchNorm2d(attention_channel)
  11. self.relu = nn.ReLU(inplace=True)
  12. self.channel_fc = nn.Conv2d(attention_channel, in_planes, 1, bias=True)
  13. self.func_channel = self.get_channel_attention
  14. if in_planes == groups and in_planes == out_planes: # depth-wise convolution
  15. self.func_filter = self.skip
  16. else:
  17. self.filter_fc = nn.Conv2d(attention_channel, out_planes, 1, bias=True)
  18. self.func_filter = self.get_filter_attention
  19. if kernel_size == 1: # point-wise convolution
  20. self.func_spatial = self.skip
  21. else:
  22. self.spatial_fc = nn.Conv2d(attention_channel, kernel_size * kernel_size, 1, bias=True)
  23. self.func_spatial = self.get_spatial_attention
  24. if kernel_num == 1:
  25. self.func_kernel = self.skip
  26. else:
  27. self.kernel_fc = nn.Conv2d(attention_channel, kernel_num, 1, bias=True)
  28. self.func_kernel = self.get_kernel_attention
  29. self._initialize_weights()
  30. def _initialize_weights(self):
  31. for m in self.modules():
  32. if isinstance(m, nn.Conv2d):
  33. nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
  34. if m.bias is not None:
  35. nn.init.constant_(m.bias, 0)
  36. if isinstance(m, nn.BatchNorm2d):
  37. nn.init.constant_(m.weight, 1)
  38. nn.init.constant_(m.bias, 0)
  39. def update_temperature(self, temperature):
  40. self.temperature = temperature
  41. @staticmethod
  42. def skip(_):
  43. return 1.0
  44. def get_channel_attention(self, x):
  45. channel_attention = torch.sigmoid(self.channel_fc(x).view(x.size(0), -1, 1, 1) / self.temperature)
  46. return channel_attention
  47. def get_filter_attention(self, x):
  48. filter_attention = torch.sigmoid(self.filter_fc(x).view(x.size(0), -1, 1, 1) / self.temperature)
  49. return filter_attention
  50. def get_spatial_attention(self, x):
  51. spatial_attention = self.spatial_fc(x).view(x.size(0), 1, 1, 1, self.kernel_size, self.kernel_size)
  52. spatial_attention = torch.sigmoid(spatial_attention / self.temperature)
  53. return spatial_attention
  54. def get_kernel_attention(self, x):
  55. kernel_attention = self.kernel_fc(x).view(x.size(0), -1, 1, 1, 1, 1)
  56. kernel_attention = F.softmax(kernel_attention / self.temperature, dim=1)
  57. return kernel_attention
  58. def forward(self, x):
  59. x = self.avgpool(x)
  60. x = self.fc(x)
  61. x = self.bn(x)
  62. x = self.relu(x)
  63. return self.func_channel(x), self.func_filter(x), self.func_spatial(x), self.func_kernel(x)

当in_planes == groups 并且 in_planes == out_planes时(即depth-wise convolution),filter_attention不起作用(即等于1);当kernel_size=1时,spatial_attention不起作用(即等于1);当kernel_num=1时,kernel_attention不起作用(即等于1)。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/366805
推荐阅读
相关标签
  

闽ICP备14008679号