当前位置:   article > 正文

yolo注意力机制手把手教学_yolo使用的注意力机制应该添加在哪个地方

yolo使用的注意力机制应该添加在哪个地方

yolov5加入注意力机制步骤

1.common.py添加相应条件
2.yolo.py添加判断条件
3.创建属于自己的注意力yaml文件

第一步:

一、common.py

在common.py中先添加你想添加的注意力模块

  1. class h_sigmoid(nn.Module):
  2. def __init__(self, inplace=True):
  3. super(h_sigmoid, self).__init__()
  4. self.relu = nn.ReLU6(inplace=inplace)
  5. def forward(self, x):
  6. return self.relu(x + 3) / 6
  7. class h_swish(nn.Module):
  8. def __init__(self, inplace=True):
  9. super(h_swish, self).__init__()
  10. self.sigmoid = h_sigmoid(inplace=inplace)
  11. def forward(self, x):
  12. return x * self.sigmoid(x)
  13. class CoordAtt(nn.Module):
  14. def __init__(self, inp, oup, reduction=32):
  15. super(CoordAtt, self).__init__()
  16. self.pool_h = nn.AdaptiveAvgPool2d((None, 1))
  17. self.pool_w = nn.AdaptiveAvgPool2d((1, None))
  18. mip = max(8, inp // reduction)
  19. self.conv1 = nn.Conv2d(inp, mip, kernel_size=1, stride=1, padding=0)
  20. self.bn1 = nn.BatchNorm2d(mip)
  21. self.act = h_swish()
  22. self.conv_h = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
  23. self.conv_w = nn.Conv2d(mip, oup, kernel_size=1, stride=1, padding=0)
  24. def forward(self, x):
  25. identity = x
  26. n, c, h, w = x.size()
  27. x_h = self.pool_h(x)
  28. x_w = self.pool_w(x).permute(0, 1, 3, 2)
  29. y = torch.cat([x_h, x_w], dim=2)
  30. y = self.conv1(y)
  31. y = self.bn1(y)
  32. y = self.act(y)
  33. x_h, x_w = torch.split(y, [h, w], dim=2)
  34. x_w = x_w.permute(0, 1, 3, 2)
  35. a_h = self.conv_h(x_h).sigmoid()
  36. a_w = self.conv_w(x_w).sigmoid()
  37. out = identity * a_w * a_h
  38. return out
  39. class SELayer(nn.Module):
  40. def __init__(self, c1, r=16):
  41. super(SELayer, self).__init__()
  42. self.avgpool = nn.AdaptiveAvgPool2d(1)
  43. self.l1 = nn.Linear(c1, c1 // r, bias=False)
  44. self.relu = nn.ReLU(inplace=True)
  45. self.l2 = nn.Linear(c1 // r, c1, bias=False)
  46. self.sig = nn.Sigmoid()
  47. def forward(self, x):
  48. b, c, _, _ = x.size()
  49. y = self.avgpool(x).view(b, c)
  50. y = self.l1(y)
  51. y = self.relu(y)
  52. y = self.l2(y)
  53. y = self.sig(y)
  54. y = y.view(b, c, 1, 1)
  55. return x * y.expand_as(x)
  56. class eca_layer(nn.Module):
  57. """Constructs a ECA module.
  58. Args:
  59. channel: Number of channels of the input feature map
  60. k_size: Adaptive selection of kernel size
  61. """
  62. def __init__(self, channel, k_size=3):
  63. super(eca_layer, self).__init__()
  64. self.avg_pool = nn.AdaptiveAvgPool2d(1)
  65. self.conv = nn.Conv1d(1, 1, kernel_size=k_size, padding=(k_size - 1) // 2, bias=False)
  66. self.sigmoid = nn.Sigmoid()
  67. def forward(self, x):
  68. # feature descriptor on the global spatial information
  69. y = self.avg_pool(x)
  70. # Two different branches of ECA module
  71. y = self.conv(y.squeeze(-1).transpose(-1, -2)).transpose(-1, -2).unsqueeze(-1)
  72. # Multi-scale information fusion
  73. y = self.sigmoid(y)
  74. x = x * y.expand_as(x)
  75. return x * y.expand_as(x)
  76. class ChannelAttention(nn.Module):
  77. def __init__(self, in_planes, ratio=16):
  78. super(ChannelAttention, self).__init__()
  79. self.avg_pool = nn.AdaptiveAvgPool2d(1)
  80. self.max_pool = nn.AdaptiveMaxPool2d(1)
  81. self.f1 = nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False)
  82. self.relu = nn.ReLU()
  83. self.f2 = nn.Conv2d(in_planes // ratio, in_planes, 1, bias=False)
  84. # 写法二,亦可使用顺序容器
  85. # self.sharedMLP = nn.Sequential(
  86. # nn.Conv2d(in_planes, in_planes // ratio, 1, bias=False), nn.ReLU(),
  87. # nn.Conv2d(in_planes // rotio, in_planes, 1, bias=False))
  88. self.sigmoid = nn.Sigmoid()
  89. def forward(self, x):
  90. avg_out = self.f2(self.relu(self.f1(self.avg_pool(x))))
  91. max_out = self.f2(self.relu(self.f1(self.max_pool(x))))
  92. out = self.sigmoid(avg_out + max_out)
  93. return out
  94. class SpatialAttention(nn.Module):
  95. def __init__(self, kernel_size=7):
  96. super(SpatialAttention, self).__init__()
  97. assert kernel_size in (3, 7), 'kernel size must be 3 or 7'
  98. padding = 3 if kernel_size == 7 else 1
  99. self.conv = nn.Conv2d(2, 1, kernel_size, padding=padding, bias=False)
  100. self.sigmoid = nn.Sigmoid()
  101. def forward(self, x):
  102. avg_out = torch.mean(x, dim=1, keepdim=True)
  103. max_out, _ = torch.max(x, dim=1, keepdim=True)
  104. x = torch.cat([avg_out, max_out], dim=1)
  105. x = self.conv(x)
  106. return self.sigmoid(x)
  107. class CBAMC3(nn.Module):
  108. # CSP Bottleneck with 3 convolutions
  109. def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
  110. super(CBAMC3, self).__init__()
  111. c_ = int(c2 * e) # hidden channels
  112. self.cv1 = Conv(c1, c_, 1, 1)
  113. self.cv2 = Conv(c1, c_, 1, 1)
  114. self.cv3 = Conv(2 * c_, c2, 1)
  115. self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
  116. self.channel_attention = ChannelAttention(c2, 16)
  117. self.spatial_attention = SpatialAttention(7)
  118. # self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
  119. def forward(self, x):
  120. out = self.channel_attention(x) * x
  121. print('outchannels:{}'.format(out.shape))
  122. out = self.spatial_attention(out) * out
  123. return out

二、yolo.py

def parse_model(d, ch):函数下
在下面代码中增加你想添加的注意力名称

原始

  1. if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
  2. BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]:
  3. c1, c2 = ch[f], args[0]
  4. if c2 != no: # if not output
  5. c2 = make_divisible(c2 * gw, 8)
  6. args = [c1, c2, *args[1:]]
  7. if m in [BottleneckCSP, C3, C3TR, C3Ghost]:
  8. args.insert(2, n) # number of repeats
  9. n = 1

添加后

  1. if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
  2. BottleneckCSP, C3, C3TR, C3SPP, C3Ghost, CoordAtt]:
  3. c1, c2 = ch[f], args[0]
  4. if c2 != no: # if not output
  5. c2 = make_divisible(c2 * gw, 8)
  6. args = [c1, c2, *args[1:]]
  7. if m in [BottleneckCSP, C3, C3TR, C3Ghost]:
  8. args.insert(2, n) # number of repeats
  9. n = 1

三.也是最后一步啦!创建自定义的yaml文件

这里我使用的是yolov5s.yaml为模板。注意力机制放置的位置并不是唯一的,需要根据你的数据集来摸索测试,可能别人放这儿涨点了,但是你放这儿没有效果,俗称“玄学”。
CA.yaml代码如下(示例):

  1. # YOLOv5 本文内容由网友自发贡献,转载请注明出处:https://www.wpsshop.cn/w/IT小白/article/detail/757493
    推荐阅读
    相关标签