当前位置:   article > 正文

Vision Transformer代码

vision transformer代码

VIT总体架构

在这里插入图片描述

Transformer Encoder

在这里插入图片描述

MLP block

在这里插入图片描述

代码

#%%
import torch
from torch import nn
from einops import rearrange,repeat
from einops.layers.torch import Rearrange
#%%
def pair(t):
    return t if isinstance(t,tuple) else (t,t)
#%%
class PreNorm(nn.Module):
    def __init__(self,dim,fn):
        super().__init__()
        self.norm = nn.LayerNorm(dim)
        self.fn = fn
    def forward(self,x,**kwargs):
        return self.fn(self.norm(x),**kwargs)
#%%
class FeedForward(nn.Module):
    def __init__(self,dim,hidden_dim,dropout=0.):
        super().__init__()
        self.net = nn.Sequential(
            nn.Linear(dim,hidden_dim),
            nn.GELU(),
            nn.Dropout(dropout),
            nn.Linear(hidden_dim,dim),
            nn.Dropout(dropout)
        )
    def forward(self,x):
        return self.net(x)
#%%
class Attention(nn.Module):
    def __init__(self,dim,heads=8,dim_head=64,dropout=0.):
        super().__init__()
        inner_dim = dim_head * heads
        project_out = not (heads==1 and dim_head ==dim)

        self.heads = heads
        self.scale = dim_head ** -0.5

        self.attend = nn.Softmax(dim=-1)
        self.to_qkv = nn.Linear(dim,inner_dim * 3,bias = False)

        self.to_out = nn.Sequential(
            nn.Linear(inner_dim,dim),
            nn.Dropout(dropout),
        ) if project_out else nn.Identity()

    def forward(self,x):
        qkv = self.to_qkv(x).chunk(3,dim=-1)
        q,k,v = map(lambda t:rearrange(t,'b n (h d) -> b h n d',h = self.heads),qkv)

        dots = torch.matmul(q,k.transpose(-1,-2)) * self.scale

        attn = self.attend(dots)

        out = torch.matmul(attn,v)
        out = rearrange(out,'b h n d -> b n (h d)')
        return self.to_out(out)
#%%
class Transformer(nn.Module):
    def __init__(self,dim,depth,heads,dim_head,mlp_dim,dropout=0.):
        super().__init__()
        self.layers = nn.ModuleList([])
        # 把多个encoder堆叠在一起
        for _ in range(depth):
            self.layers.append(nn.ModuleList([
                # encoder中的 多头注意力机制 + Add & Norm
                PreNorm(dim,Attention(dim,heads=heads,dim_head=dim_head,dropout=dropout)),
                # Feed Forward + Add & Norm
                PreNorm(dim,FeedForward(dim,mlp_dim,dropout=dropout))
            ]))
    def forward(self,x):
        for attn,ff in self.layers:
            # 输入到多头注意力机制 并进行 残差连接
            x = attn(x) + x
            # 输入到前馈神经网络 并进行 残差连接
            x = ff(x) + x
        return x

#%%
class VIT(nn.Module):
    def __init__(self,*,image_size,patch_size,num_classes,dim,depth,heads,mlp_dim,pool='cls',channels=3,dim_head=64,emb_dropout=0.,dropout=0.):
        super().__init__()
        image_height,image_width = pair(image_size) #224x224
        patch_height,patch_width = pair(patch_size) #16x16

        # 图片的高和宽必须分别整除patch_height和patch_width
        assert image_height % patch_height ==0 and image_width % patch_width ==0,'Image dimensions must be divisible by the patch_size'
        num_patches = (image_height//patch_height) * (image_width//patch_width)
        patch_dim = channels * patch_height * patch_width # 把patch展平后的维度
        assert pool in {'cls','mean'},'pool类型必须是cls token或者平均池化'

        '''模块一:把每个patch转换为embedding'''
        self.to_patch_embedding = nn.Sequential(
            # (样本数,通道数,高上划分的patch数,宽上划分的patch数)-> (样本数,(patch的高,patch的宽),(patch在高上的个数,patch在宽上的个数,通道道数))
            Rearrange('b c (h p1) (w p2) -> b (h w) (p1 p2 c)',p1=patch_height,p2=patch_width),
            # 1.把patch拉平,变成patch_dim。 2.把patch_dim变成dim,即embedding需要的维度。
            nn.Linear(patch_dim,dim),
        )

        '''模块二:生成位置编码'''
        # dim是encoder需要的维度,num_patches+1是把cls的位置编码也给加上去了
        # nn.Parameter是pytorch的一个类,用于将一个张量标记为模型的参数,训练过程中模型将更新这些参数以最小化损失函数
        # torch.randn(1,2,3) 生成一个形状为(1,2,3)的符合正态分布的随机数
        self.pos_embedding = nn.Parameter(torch.randn(1,num_patches+1,dim))
        self.cls_token = nn.Parameter(torch.randn(1,1,dim))
        self.dropout = nn.Dropout(emb_dropout)

        '''模块三:transformer的encoder'''
        self.transformer = Transformer(dim,depth,heads,dim_head,mlp_dim,dropout)
        # 池化操作
        self.pool = pool
        # 不做任何操作
        self.to_latent = nn.Identity()

        '''模块四:mlp_head'''
        self.mlp_head = nn.Sequential(
            nn.LayerNorm(dim),
            nn.Linear(dim,num_classes)
        )

    def forward(self,img):
        x = self.to_patch_embedding(img)
        b,n,_ = x.shape

        cls_tokens = repeat(self.cls_token,'() n d -> b n d',b=b)
        x = torch.cat((cls_tokens,x),dim=1)
        x+=self.pos_embedding[:,:(n+1)]
        x = self.dropout(x)

        x = self.transformer(x)

        x = x.mean(dim=1) if self.pool == 'mean' else x[:,0]

        x = self.to_latent(x)
        return self.mlp_head(x)
#%%
v = VIT(
    image_size = 224,
    patch_size = 16,
    num_classes = 1000,# 最后cls拿出来做linear层的时候映射到多少个维度上
    dim = 1024,
    depth = 6, # encoder的个数
    heads = 16, # 多头注意力机制的头
    mlp_dim = 2048,
    dropout = 0.1,
    emb_dropout = 0.1
)
#%%
img = torch.randn(1,3,224,224)
preds = v(img)
preds.shape
#%%

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102
  • 103
  • 104
  • 105
  • 106
  • 107
  • 108
  • 109
  • 110
  • 111
  • 112
  • 113
  • 114
  • 115
  • 116
  • 117
  • 118
  • 119
  • 120
  • 121
  • 122
  • 123
  • 124
  • 125
  • 126
  • 127
  • 128
  • 129
  • 130
  • 131
  • 132
  • 133
  • 134
  • 135
  • 136
  • 137
  • 138
  • 139
  • 140
  • 141
  • 142
  • 143
  • 144
  • 145
  • 146
  • 147
  • 148
  • 149
  • 150
  • 151
  • 152
  • 153
  • 154

补充

Vision Transformer(ViT)PyTorch代码全解析(附图解)
https://blog.csdn.net/weixin_44966641/article/details/118733341

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/不正经/article/detail/345808
推荐阅读
相关标签
  

闽ICP备14008679号