当前位置:   article > 正文

Transformer总结——VIT_vit 多头注意力

vit 多头注意力

1:在attention is all you need 文章中,作者提出了多头注意力。

注意力公式:dk是K的维度。

 多头公式:

VIT将多头注意力应用到了图像领域,所以具体看一下VIT关于多头注意力的代码实现。

  1. class PatchEmbed(nn.Module):
  2. def __init__(self, img_size=224, patch_size=16, in_c=3, embed_dim=768, norm_layer=None):
  3. super().__init__()
  4. img_size = (img_size, img_size)
  5. patch_size = (patch_size, patch_size)
  6. self.img_size = img_size
  7. self.patch_size = patch_size
  8. self.grid_size = (img_size[0] // patch_size[0], img_size[1] // patch_size[1])
  9. self.num_patches = self.grid_size[0] * self.grid_size[1]
  10. self.proj = nn.Conv2d(in_c, embed_dim, kernel_size=patch_size, stride=patch_size)
  11. self.norm = norm_layer(embed_dim) if norm_layer else nn.Identity()
  12. def forward(self, x):
  13. B, C, H, W = x.shape
  14. assert H == self.img_size[0] and W == self.img_size[1], \
  15. f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
  16. # flatten: [B, C, H, W] -> [B, C, HW]
  17. # transpose: [B, C, HW] -> [B, HW, C]
  18. x = self.proj(x).flatten(2).transpose(1, 2)
  19. x = self.norm(x)
  20. return x
  21. class Attention(nn.Module):
  22. def __init__(self,
  23. dim, # 输入token的dim
  24. num_heads=8,
  25. qkv_bias=False,
  26. qk_scale=None,
  27. attn_drop_ratio=0.,
  28. proj_drop_ratio=0.):
  29. super(Attention, self).__init__()
  30. self.num_heads = num_heads
  31. head_dim = dim // num_heads
  32. self.scale = qk_scale or head_dim ** -0.5
  33. self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
  34. self.attn_drop = nn.Dropout(attn_drop_ratio)
  35. self.proj = nn.Linear(dim, dim)
  36. self.proj_drop = nn.Dropout(proj_drop_ratio)
  37. def forward(self, x):
  38. # [batch_size, num_patches + 1, total_embed_dim]
  39. B, N, C = x.shape #(1,197,768)
  40. # qkv(): -> [batch_size, num_patches + 1, 3 * total_embed_dim]
  41. # reshape: -> [batch_size, num_patches + 1, 3, num_heads, embed_dim_per_head]
  42. # permute: -> [3, batch_size, num_heads, num_patches + 1, embed_dim_per_head]
  43. qkv = self.qkv(x).reshape(B, N, 3, self.num_heads, C // self.num_heads).permute(2, 0, 3, 1, 4)
  44. # [batch_size, num_heads, num_patches + 1, embed_dim_per_head]
  45. q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
  46. # transpose: -> [batch_size, num_heads, embed_dim_per_head, num_patches + 1]
  47. # @: multiply -> [batch_size, num_heads, num_patches + 1, num_patches + 1]
  48. attn = (q @ k.transpose(-2, -1)) * self.scale
  49. attn = attn.softmax(dim=-1)
  50. attn = self.attn_drop(attn)
  51. # @: multiply -> [batch_size, num_heads, num_patches + 1, embed_dim_per_head]
  52. # transpose: -> [batch_size, num_patches + 1, num_heads, embed_dim_per_head]
  53. # reshape: -> [batch_size, num_patches + 1, total_embed_dim]
  54. x = (attn @ v).transpose(1, 2).reshape(B, N, C)
  55. x = self.proj(x)
  56. x = self.proj_drop(x)
  57. return x
  58. class Block(nn.Module):
  59. def __init__(self,
  60. dim,
  61. num_heads,
  62. mlp_ratio=4.,
  63. qkv_bias=False,
  64. qk_scale=None,
  65. drop_ratio=0.,
  66. attn_drop_ratio=0.,
  67. drop_path_ratio=0.,
  68. act_layer=nn.GELU,
  69. norm_layer=nn.LayerNorm):
  70. super(Block, self).__init__()
  71. self.norm1 = norm_layer(dim)
  72. self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
  73. attn_drop_ratio=attn_drop_ratio, proj_drop_ratio=drop_ratio)
  74. # NOTE: drop path for stochastic depth, we shall see if this is better than dropout here
  75. self.drop_path = DropPath(drop_path_ratio) if drop_path_ratio > 0. else nn.Identity()
  76. self.norm2 = norm_layer(dim)
  77. mlp_hidden_dim = int(dim * mlp_ratio)
  78. self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop_ratio)
  79. def forward(self, x):
  80. x = x + self.drop_path(self.attn(self.norm1(x)))
  81. x = x + self.drop_path(self.mlp(self.norm2(x)))
  82. return x

 1:首先将图像进行分块。

 2:然后进行注意力计算,首先获得q,k,v。

 3:接着进行注意力计算

 4:然后进行下一步处理:

 5:这样整个Transformer encoder就结束了。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/天景科技苑/article/detail/764281
推荐阅读
相关标签
  

闽ICP备14008679号