赞
踩
论文题目:ViT-V-Net: Vision Transformer for Unsupervised Volumetric Medical Image Registration
源码链接:https://github.com/junyuchen245/ViT-V-Net_for_3D_Image_Registration_Pytorch
config.py:
配置初始的参数
- import ml_collections
-
- def get_3DReg_config():
- config = ml_collections.ConfigDict()
- config.patches = ml_collections.ConfigDict({'size': (8, 8, 8)})
- config.patches.grid = (8, 8, 8)
- config.hidden_size = 252
- config.transformer = ml_collections.ConfigDict()
- config.transformer.mlp_dim = 3072
- config.transformer.num_heads = 12
- config.transformer.num_layers = 12
- config.transformer.attention_dropout_rate = 0.0
- config.transformer.dropout_rate = 0.1
- config.patch_size = 8
-
- config.conv_first_channel = 512
- config.encoder_channels = (16, 32, 32)
- config.down_factor = 2
- config.down_num = 2
- config.decoder_channels = (96, 48, 32, 32, 16)
- config.skip_channels = (32, 32, 32, 32, 16)
- config.n_dims = 3
- config.n_skip = 5
- return config
models.py:
Multi-head attention
Attention理论上是上图中的橙色部分
- class Attention(nn.Module):
- def __init__(self, config, vis):
- super(Attention, self).__init__()
- self.vis = vis
- self.num_attention_heads = config.transformer["num_heads"]
- self.attention_head_size = int(config.hidden_size / self.num_attention_heads)
- self.all_head_size = self.num_attention_heads * self.attention_head_size
-
- self.query = Linear(config.hidden_size, self.all_head_size)
- self.key = Linear(config.hidden_size, self.all_head_size)
- self.value = Linear(config.hidden_size, self.all_head_size)
-
- self.out = Linear(config.hidden_size, config.hidden_size)
- self.attn_dropout = Dropout(config.transformer["attention_dropout_rate"])
- self.proj_dropout = Dropout(config.transformer["attention_dropout_rate"])
-
- self.softmax = Softmax(dim=-1)
-
- def transpose_for_scores(self, x):
- new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size)
- x = x.view(*new_x_shape)
- return x.permute(0, 2, 1, 3)
-
- def forward(self, hidden_states):
- mixed_query_layer = self.query(hidden_states)
- mixed_key_layer = self.key(hidden_states)
- mixed_value_layer = self.value(hidden_states)
-
- query_layer = self.transpose_for_scores(mixed_query_layer)
- key_layer = self.transpose_for_scores(mixed_key_layer)
- value_layer = self.transpose_for_scores(mixed_value_layer)
-
- attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2))
- attention_scores = attention_scores / math.sqrt(self.attention_head_size)
- attention_probs = self.softmax(attention_scores)
- weights = attention_probs if self.vis else None
- attention_probs = self.attn_dropout(attention_probs)
-
- context_layer = torch.matmul(attention_probs, value_layer)
- context_layer = context_layer.permute(0, 2, 1, 3).contiguous()
- new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,)
- context_layer = context_layer.view(*new_context_layer_shape)
- attention_output = self.out(context_layer)
- attention_output = self.proj_dropout(attention_output)
- return attention_output, weights
Mlp是图三中的黄色部分 ,前向神经网络
- class Mlp(nn.Module):#前向神经网络
- def __init__(self, config):
- super(Mlp, self).__init__()
- self.fc1 = Linear(config.hidden_size, config.transformer["mlp_dim"])
- self.fc2 = Linear(config.transformer["mlp_dim"], config.hidden_size)
- self.act_fn = ACT2FN["gelu"]
- self.dropout = Dropout(config.transformer["dropout_rate"])
-
- self._init_weights()
-
- def _init_weights(self):
- nn.init.xavier_uniform_(self.fc1.weight)
- nn.init.xavier_uniform_(self.fc2.weight)
- nn.init.normal_(self.fc1.bias, std=1e-6)
- nn.init.normal_(self.fc2.bias, std=1e-6)
-
- def forward(self, x):
- x = self.fc1(x)
- x = self.act_fn(x)
- x = self.dropout(x)
- x = self.fc2(x)
- x = self.dropout(x)
- return x
Embeddings是下图的部分
embedding这里是用cov3d来进行patch的,输出的是210块,
(B, n_patch, hidden)=([2, 210, 252]),经过transformer也是这个尺寸。
- class Embeddings(nn.Module):
- """Construct the embeddings from patch, position embeddings.
- """
- def __init__(self, config, img_size):
- super(Embeddings, self).__init__()
- self.config = config
- down_factor = config.down_factor
- patch_size = _triple(config.patches["size"])
- n_patches = int((img_size[0]/2**down_factor// patch_size[0]) * (img_size[1]/2**down_factor// patch_size[1]) * (img_size[2]/2**down_factor// patch_size[2]))
- self.hybrid_model = CNNEncoder(config, n_channels=2)
- in_channels = config['encoder_channels'][-1]
- self.patch_embeddings = Conv3d(in_channels=in_channels,
- out_channels=config.hidden_size,
- kernel_size=patch_size,
- stride=patch_size)
- self.position_embeddings = nn.Parameter(torch.zeros(1, n_patches, config.hidden_size))
-
- self.dropout = Dropout(config.transformer["dropout_rate"])
-
- def forward(self, x):
- x, features = self.hybrid_model(x)
- x = self.patch_embeddings(x) # (B, hidden. n_patches^(1/2), n_patches^(1/2))
- x = x.flatten(2)
- x = x.transpose(-1, -2) # (B, n_patches, hidden)
- embeddings = x + self.position_embeddings
- embeddings = self.dropout(embeddings)
- return embeddings, features
Block就是图3中的灰色框内的卷积层部分,灰色框并不是全部的transformer,而且block的类里不是12个块,只定义了一个块,12个块的循环在Ecoder类中定义
- class Block(nn.Module):
- def __init__(self, config, vis):
- super(Block, self).__init__()
- self.hidden_size = config.hidden_size
- self.attention_norm = LayerNorm(config.hidden_size, eps=1e-6)
- self.ffn_norm = LayerNorm(config.hidden_size, eps=1e-6)
- self.ffn = Mlp(config)
- self.attn = Attention(config, vis)
-
- def forward(self, x):
- h = x
-
- x = self.attention_norm(x)
- x, weights = self.attn(x)
- x = x + h
-
- h = x
- x = self.ffn_norm(x)
- x = self.ffn(x)
- x = x + h
- return x, weights
encoder就是12个block放一起
- class Encoder(nn.Module):
- def __init__(self, config, vis):
- super(Encoder, self).__init__()
- self.vis = vis
- self.layer = nn.ModuleList()
- self.encoder_norm = LayerNorm(config.hidden_size, eps=1e-6)
- for _ in range(config.transformer["num_layers"]):
- layer = Block(config, vis)
- self.layer.append(copy.deepcopy(layer))
-
- def forward(self, hidden_states):
- attn_weights = []
- for layer_block in self.layer:
- hidden_states, weights = layer_block(hidden_states)
- if self.vis:
- attn_weights.append(weights)
- encoded = self.encoder_norm(hidden_states)
- return encoded, attn_weights
DecoderBlock是图1和图2的绿色部分, 用于DecoderCup,
- class DecoderBlock(nn.Module):
- def __init__(
- self,
- in_channels,
- out_channels,
- skip_channels=0,
- use_batchnorm=True,
- ):
- super().__init__()
- self.conv1 = Conv3dReLU(
- in_channels + skip_channels,
- out_channels,
- kernel_size=3,
- padding=1,
- use_batchnorm=use_batchnorm,
- )
- self.conv2 = Conv3dReLU(
- out_channels,
- out_channels,
- kernel_size=3,
- padding=1,
- use_batchnorm=use_batchnorm,
- )
- self.up = nn.Upsample(scale_factor=2, mode='trilinear', align_corners=False)
-
- def forward(self, x, skip=None):
- x = self.up(x)
- if skip is not None:
- x = torch.cat([x, skip], dim=1)
- x = self.conv1(x)
- x = self.conv2(x)
- return x
DecoderCup是整个的解码过程
- class DecoderCup(nn.Module):
- def __init__(self, config, img_size):
- super().__init__()
- self.config = config
- self.down_factor = config.down_factor
- head_channels = config.conv_first_channel
- self.img_size = img_size
- self.conv_more = Conv3dReLU(
- config.hidden_size,
- head_channels,
- kernel_size=3,
- padding=1,
- use_batchnorm=True,
- )
- decoder_channels = config.decoder_channels
- in_channels = [head_channels] + list(decoder_channels[:-1])
- out_channels = decoder_channels
- self.patch_size = _triple(config.patches["size"])
- skip_channels = self.config.skip_channels
- blocks = [
- DecoderBlock(in_ch, out_ch, sk_ch) for in_ch, out_ch, sk_ch in zip(in_channels, out_channels, skip_channels)
- ]
- self.blocks = nn.ModuleList(blocks)
Transformer是整个图3的组成,图1中的橙色部分
- class Transformer(nn.Module):
- def __init__(self, config, img_size, vis):
- super(Transformer, self).__init__()
- self.embeddings = Embeddings(config, img_size=img_size)
- self.encoder = Encoder(config, vis)
-
- def forward(self, input_ids):
- embedding_output, features = self.embeddings(input_ids)
- encoded, attn_weights = self.encoder(embedding_output) # (B, n_patch, hidden)
- return encoded, attn_weights, features
SpatialTransformer这里和VM一模一样,是图1中的Spatial Transformer蓝色部分,用于使移动图像发生形变。
- class SpatialTransformer(nn.Module):
- """
- N-D Spatial Transformer
- Obtained from https://github.com/voxelmorph/voxelmorph
- """
-
- def __init__(self, size, mode='bilinear'):
- super().__init__()
-
- self.mode = mode
-
- # create sampling grid
- vectors = [torch.arange(0, s) for s in size]
- grids = torch.meshgrid(vectors)
- grid = torch.stack(grids)
- grid = torch.unsqueeze(grid, 0)
- grid = grid.type(torch.FloatTensor)
-
- # registering the grid as a buffer cleanly moves it to the GPU, but it also
- # adds it to the state dict. this is annoying since everything in the state dict
- # is included when saving weights to disk, so the model files are way bigger
- # than they need to be. so far, there does not appear to be an elegant solution.
- # see: https://discuss.pytorch.org/t/how-to-register-buffer-without-polluting-state-dict
- self.register_buffer('grid', grid)
-
- def forward(self, src, flow):
- # new locations
- #print("self.grid.shape", self.grid.shape)
- #print( "flow.shape", flow.shape )
- new_locs = self.grid + flow
- shape = flow.shape[2:]
-
- # need to normalize grid values to [-1, 1] for resampler
- for i in range(len(shape)):
- new_locs[:, i, ...] = 2 * (new_locs[:, i, ...] / (shape[i] - 1) - 0.5)
-
- # move channels dim to last position
- # also not sure why, but the channels need to be reversed
- if len(shape) == 2:
- new_locs = new_locs.permute(0, 2, 3, 1)
- new_locs = new_locs[..., [1, 0]]
- elif len(shape) == 3:
- new_locs = new_locs.permute(0, 2, 3, 4, 1)
- new_locs = new_locs[..., [2, 1, 0]]
-
- return nnf.grid_sample(src, new_locs, align_corners=True, mode=self.mode)
DoubleConv双重卷积,就是2层卷积,代码没有用leakyrelu,只用了relu。
- class DoubleConv(nn.Module):
- """(convolution => [BN] => ReLU) * 2"""
-
- def __init__(self, in_channels, out_channels, mid_channels=None):
- super().__init__()
- if not mid_channels:
- mid_channels = out_channels
- self.double_conv = nn.Sequential(
- nn.Conv3d(in_channels, mid_channels, kernel_size=3, padding=1),
- nn.ReLU(inplace=True),
- nn.Conv3d(mid_channels, out_channels, kernel_size=3, padding=1),
- nn.ReLU(inplace=True)
- )
-
- def forward(self, x):
- return self.double_conv(x)
Down在2层卷积后加入池化层,池化层可以有效的缩小参数矩阵的尺寸,从而减少最后连接层的中的参数数量。所以加入池化层可以加快计算速度和防止过拟合的作用
- class Down(nn.Module):
- """Downscaling with maxpool then double conv"""
-
- def __init__(self, in_channels, out_channels):
- super().__init__()
- self.maxpool_conv = nn.Sequential(
- nn.MaxPool3d(2),
- DoubleConv(in_channels, out_channels)
- )
-
- def forward(self, x):
- return self.maxpool_conv(x)
Conv3dReLU 用于解码中 ,是一个带有relu激活函数的3D卷积,为什么归一化这里,代码用BatchNorm?而图中是instance norm。而且也没用leakyrelu
- class Conv3dReLU(nn.Sequential):
- def __init__(
- self,
- in_channels,
- out_channels,
- kernel_size,
- padding=0,
- stride=1,
- use_batchnorm=True,
- ):
- conv = nn.Conv3d(
- in_channels,
- out_channels,
- kernel_size,
- stride=stride,
- padding=padding,
- bias=not (use_batchnorm),
- )
- relu = nn.ReLU(inplace=True)
-
- bn = nn.BatchNorm3d(out_channels)
-
- super(Conv3dReLU, self).__init__(conv, bn, relu)
CNNEncoder,就是这3层卷积,输出权重和一个特征图,之后接着embedding
疑问:为什么图中每层只有一次池化,这里又加入了三个池化?而且上图中instance norm在代码里根本没有
- class CNNEncoder(nn.Module):
- def __init__(self, config, n_channels=2):
- super(CNNEncoder, self).__init__()
- self.n_channels = n_channels
- decoder_channels = config.decoder_channels
- encoder_channels = config.encoder_channels
- self.down_num = config.down_num
- self.inc = DoubleConv(n_channels, encoder_channels[0])
- self.down1 = Down(encoder_channels[0], encoder_channels[1])
- self.down2 = Down(encoder_channels[1], encoder_channels[2])
- self.width = encoder_channels[-1]
- def forward(self, x):
- features = []
- x1 = self.inc(x)
- features.append(x1)
- x2 = self.down1(x1)
- features.append(x2)
- feats = self.down2(x2)
- features.append(feats)
- feats_down = feats
- for i in range(self.down_num):
- feats_down = nn.MaxPool3d(2)(feats_down)
- features.append(feats_down)
- return feats, features[::-1]
RegistrationHead配准,用于主框架中
- class RegistrationHead(nn.Sequential):
- def __init__(self, in_channels, out_channels, kernel_size=3, upsampling=1):
- conv3d = nn.Conv3d(in_channels, out_channels, kernel_size=kernel_size, padding=kernel_size // 2)
- conv3d.weight = nn.Parameter(Normal(0, 1e-5).sample(conv3d.weight.shape))
- conv3d.bias = nn.Parameter(torch.zeros(conv3d.bias.shape))
- super().__init__(conv3d)
ViTVNet:整体网络框架
- class ViTVNet(nn.Module):
- def __init__(self, config, img_size=(64, 256, 256), int_steps=7, vis=False, mode='bilinear'):
- super(ViTVNet, self).__init__()
- self.transformer = Transformer(config, img_size, vis)
- self.decoder = DecoderCup(config, img_size)
- self.reg_head = RegistrationHead(
- in_channels=config.decoder_channels[-1],
- out_channels=config['n_dims'],
- kernel_size=3,
- )
- self.spatial_trans = SpatialTransformer(img_size, mode)
- self.config = config
- #self.integrate = VecInt(img_size, int_steps)
- def forward(self, x):
-
- source = x[:,0:1,:,:]
- x, attn_weights, features = self.transformer(x) # (B, n_patch, hidden)
- x = self.decoder(x, features)
- flow = self.reg_head(x)
- #flow = self.integrate(x)
- #img = x[0].cuda( )
- #flow = x[1].cuda( )
-
- out = self.spatial_trans(source, flow)
- #out = self.spatial_trans( source, flow )
- return out, flow
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。