赞
踩
论文地址:2307.08388.pdf (arxiv.org)
动态蛇形卷积(Dynamic Snake Convolution)的设计灵感来源于蛇形的形状,用于改善对目标形状和边界的敏感性。能够帮助神经网络更好地捕捉目标的形状信息,特别是对于复杂的或不规则形状的目标。通过引入动态的、可变形的卷积核来实现这一目标。这种可变形的卷积核能够根据目标的形状和边界信息进行调整,从而更好地适应目标的特定形状。
传统的卷积操作在处理目标形状变化较大的情况下可能存在一定的局限性,而动态蛇形卷积则能够通过自适应性地调整卷积核的形状和大小,更有效地捕获目标的特征。
这种模块的应用通常能够增强目标检测模型对不同尺度、形状和姿态的目标的感知能力,从而提高目标检测的准确性和鲁棒性。虽然这只是目标检测中的一种模块,但它代表了在深度学习领域中不断创新和改进的努力,以提高模型对复杂场景的理解能力。
- import torch
- import torch.nn as nn
- from ..modules.conv import Conv
-
- __all__ = ['DySnakeConv']
- class DySnakeConv(nn.Module):
- def __init__(self, inc, ouc, k=3) -> None:
- super().__init__()
-
- self.conv_0 = Conv(inc, ouc, k)
- self.conv_x = DSConv(inc, ouc, 0, k)
- self.conv_y = DSConv(inc, ouc, 1, k)
-
- def forward(self, x):
- return torch.cat([self.conv_0(x), self.conv_x(x), self.conv_y(x)], dim=1)
- class DSConv(nn.Module):
- def __init__(self, in_ch, out_ch, morph, kernel_size=3, if_offset=True, extend_scope=1):
- """
- The Dynamic Snake Convolution
- :param in_ch: input channel
- :param out_ch: output channel
- :param kernel_size: the size of kernel
- :param extend_scope: the range to expand (default 1 for this method)
- :param morph: the morphology of the convolution kernel is mainly divided into two types
- along the x-axis (0) and the y-axis (1) (see the paper for details)
- :param if_offset: whether deformation is required, if it is False, it is the standard convolution kernel
- """
- super(DSConv, self).__init__()
- # use the <offset_conv> to learn the deformable offset
- self.offset_conv = nn.Conv2d(in_ch, 2 * kernel_size, 3, padding=1)
- self.bn = nn.BatchNorm2d(2 * kernel_size)
- self.kernel_size = kernel_size
- # two types of the DSConv (along x-axis and y-axis)
- self.dsc_conv_x = nn.Conv2d(
- in_ch,
- out_ch,
- kernel_size=(kernel_size, 1),
- stride=(kernel_size, 1),
- padding=0,
- )
- self.dsc_conv_y = nn.Conv2d(
- in_ch,
- out_ch,
- kernel_size=(1, kernel_size),
- stride=(1, kernel_size),
- padding=0,
- )
- self.gn = nn.GroupNorm(out_ch // 4, out_ch)
- self.act = Conv.default_act
- self.extend_scope = extend_scope
- self.morph = morph
- self.if_offset = if_offset
- def forward(self, f):
- offset = self.offset_conv(f)
- offset = self.bn(offset)
- # We need a range of deformation between -1 and 1 to mimic the snake's swing
- offset = torch.tanh(offset)
- input_shape = f.shape
- dsc = DSC(input_shape, self.kernel_size, self.extend_scope, self.morph)
- deformed_feature = dsc.deform_conv(f, offset, self.if_offset)
- if self.morph == 0:
- x = self.dsc_conv_x(deformed_feature.type(f.dtype))
- x = self.gn(x)
- x = self.act(x)
- return x
- else:
- x = self.dsc_conv_y(deformed_feature.type(f.dtype))
- x = self.gn(x)
- x = self.act(x)
- return x
- # Core code, for ease of understanding, we mark the dimensions of input and output next to the code
- class DSC(object):
- def __init__(self, input_shape, kernel_size, extend_scope, morph):
- self.num_points = kernel_size
- self.width = input_shape[2]
- self.height = input_shape[3]
- self.morph = morph
- self.extend_scope = extend_scope # offset (-1 ~ 1) * extend_scope
- # define feature map shape
- """
- B: Batch size C: Channel W: Width H: Height
- """
- self.num_batch = input_shape[0]
- self.num_channels = input_shape[1]
- """
- input: offset [B,2*K,W,H] K: Kernel size (2*K: 2D image, deformation contains <x_offset> and <y_offset>)
- output_x: [B,1,W,K*H] coordinate map
- output_y: [B,1,K*W,H] coordinate map
- """
- def _coordinate_map_3D(self, offset, if_offset):
- device = offset.device
- # offset
- y_offset, x_offset = torch.split(offset, self.num_points, dim=1)
- y_center = torch.arange(0, self.width).repeat([self.height])
- y_center = y_center.reshape(self.height, self.width)
- y_center = y_center.permute(1, 0)
- y_center = y_center.reshape([-1, self.width, self.height])
- y_center = y_center.repeat([self.num_points, 1, 1]).float()
- y_center = y_center.unsqueeze(0)
- x_center = torch.arange(0, self.height).repeat([self.width])
- x_center = x_center.reshape(self.width, self.height)
- x_center = x_center.permute(0, 1)
- x_center = x_center.reshape([-1, self.width, self.height])
- x_center = x_center.repeat([self.num_points, 1, 1]).float()
- x_center = x_center.unsqueeze(0)
- if self.morph == 0:
- """
- Initialize the kernel and flatten the kernel
- y: only need 0
- x: -num_points//2 ~ num_points//2 (Determined by the kernel size)
- !!! The related PPT will be submitted later, and the PPT will contain the whole changes of each step
- """
- y = torch.linspace(0, 0, 1)
- x = torch.linspace(
- -int(self.num_points // 2),
- int(self.num_points // 2),
- int(self.num_points),
- )
- y, x = torch.meshgrid(y, x)
- y_spread = y.reshape(-1, 1)
- x_spread = x.reshape(-1, 1)
- y_grid = y_spread.repeat([1, self.width * self.height])
- y_grid = y_grid.reshape([self.num_points, self.width, self.height])
- y_grid = y_grid.unsqueeze(0) # [B*K*K, W,H]
- x_grid = x_spread.repeat([1, self.width * self.height])
- x_grid = x_grid.reshape([self.num_points, self.width, self.height])
- x_grid = x_grid.unsqueeze(0) # [B*K*K, W,H]
- y_new = y_center + y_grid
- x_new = x_center + x_grid
- y_new = y_new.repeat(self.num_batch, 1, 1, 1).to(device)
- x_new = x_new.repeat(self.num_batch, 1, 1, 1).to(device)
- y_offset_new = y_offset.detach().clone()
- if if_offset:
- y_offset = y_offset.permute(1, 0, 2, 3)
- y_offset_new = y_offset_new.permute(1, 0, 2, 3)
- center = int(self.num_points // 2)
- # The center position remains unchanged and the rest of the positions begin to swing
- # This part is quite simple. The main idea is that "offset is an iterative process"
- y_offset_new[center] = 0
- for index in range(1, center):
- y_offset_new[center + index] = (y_offset_new[center + index - 1] + y_offset[center + index])
- y_offset_new[center - index] = (y_offset_new[center - index + 1] + y_offset[center - index])
- y_offset_new = y_offset_new.permute(1, 0, 2, 3).to(device)
- y_new = y_new.add(y_offset_new.mul(self.extend_scope))
- y_new = y_new.reshape(
- [self.num_batch, self.num_points, 1, self.width, self.height])
- y_new = y_new.permute(0, 3, 1, 4, 2)
- y_new = y_new.reshape([
- self.num_batch, self.num_points * self.width, 1 * self.height
- ])
- x_new = x_new.reshape(
- [self.num_batch, self.num_points, 1, self.width, self.height])
- x_new = x_new.permute(0, 3, 1, 4, 2)
- x_new = x_new.reshape([
- self.num_batch, self.num_points * self.width, 1 * self.height
- ])
- return y_new, x_new
- else:
- """
- Initialize the kernel and flatten the kernel
- y: -num_points//2 ~ num_points//2 (Determined by the kernel size)
- x: only need 0
- """
- y = torch.linspace(
- -int(self.num_points // 2),
- int(self.num_points // 2),
- int(self.num_points),
- )
- x = torch.linspace(0, 0, 1)
- y, x = torch.meshgrid(y, x)
- y_spread = y.reshape(-1, 1)
- x_spread = x.reshape(-1, 1)
- y_grid = y_spread.repeat([1, self.width * self.height])
- y_grid = y_grid.reshape([self.num_points, self.width, self.height])
- y_grid = y_grid.unsqueeze(0)
- x_grid = x_spread.repeat([1, self.width * self.height])
- x_grid = x_grid.reshape([self.num_points, self.width, self.height])
- x_grid = x_grid.unsqueeze(0)
- y_new = y_center + y_grid
- x_new = x_center + x_grid
- y_new = y_new.repeat(self.num_batch, 1, 1, 1)
- x_new = x_new.repeat(self.num_batch, 1, 1, 1)
- y_new = y_new.to(device)
- x_new = x_new.to(device)
- x_offset_new = x_offset.detach().clone()
- if if_offset:
- x_offset = x_offset.permute(1, 0, 2, 3)
- x_offset_new = x_offset_new.permute(1, 0, 2, 3)
- center = int(self.num_points // 2)
- x_offset_new[center] = 0
- for index in range(1, center):
- x_offset_new[center + index] = (x_offset_new[center + index - 1] + x_offset[center + index])
- x_offset_new[center - index] = (x_offset_new[center - index + 1] + x_offset[center - index])
- x_offset_new = x_offset_new.permute(1, 0, 2, 3).to(device)
- x_new = x_new.add(x_offset_new.mul(self.extend_scope))
- y_new = y_new.reshape(
- [self.num_batch, 1, self.num_points, self.width, self.height])
- y_new = y_new.permute(0, 3, 1, 4, 2)
- y_new = y_new.reshape([
- self.num_batch, 1 * self.width, self.num_points * self.height
- ])
- x_new = x_new.reshape(
- [self.num_batch, 1, self.num_points, self.width, self.height])
- x_new = x_new.permute(0, 3, 1, 4, 2)
- x_new = x_new.reshape([
- self.num_batch, 1 * self.width, self.num_points * self.height
- ])
- return y_new, x_new
- """
- input: input feature map [N,C,D,W,H];coordinate map [N,K*D,K*W,K*H]
- output: [N,1,K*D,K*W,K*H] deformed feature map
- """
- def _bilinear_interpolate_3D(self, input_feature, y, x):
- device = input_feature.device
- y = y.reshape([-1]).float()
- x = x.reshape([-1]).float()
- zero = torch.zeros([]).int()
- max_y = self.width - 1
- max_x = self.height - 1
- # find 8 grid locations
- y0 = torch.floor(y).int()
- y1 = y0 + 1
- x0 = torch.floor(x).int()
- x1 = x0 + 1
- # clip out coordinates exceeding feature map volume
- y0 = torch.clamp(y0, zero, max_y)
- y1 = torch.clamp(y1, zero, max_y)
- x0 = torch.clamp(x0, zero, max_x)
- x1 = torch.clamp(x1, zero, max_x)
- input_feature_flat = input_feature.flatten()
- input_feature_flat = input_feature_flat.reshape(
- self.num_batch, self.num_channels, self.width, self.height)
- input_feature_flat = input_feature_flat.permute(0, 2, 3, 1)
- input_feature_flat = input_feature_flat.reshape(-1, self.num_channels)
- dimension = self.height * self.width
- base = torch.arange(self.num_batch) * dimension
- base = base.reshape([-1, 1]).float()
- repeat = torch.ones([self.num_points * self.width * self.height
- ]).unsqueeze(0)
- repeat = repeat.float()
- base = torch.matmul(base, repeat)
- base = base.reshape([-1])
- base = base.to(device)
- base_y0 = base + y0 * self.height
- base_y1 = base + y1 * self.height
- # top rectangle of the neighbourhood volume
- index_a0 = base_y0 - base + x0
- index_c0 = base_y0 - base + x1
- # bottom rectangle of the neighbourhood volume
- index_a1 = base_y1 - base + x0
- index_c1 = base_y1 - base + x1
- # get 8 grid values
- value_a0 = input_feature_flat[index_a0.type(torch.int64)].to(device)
- value_c0 = input_feature_flat[index_c0.type(torch.int64)].to(device)
- value_a1 = input_feature_flat[index_a1.type(torch.int64)].to(device)
- value_c1 = input_feature_flat[index_c1.type(torch.int64)].to(device)
- # find 8 grid locations
- y0 = torch.floor(y).int()
- y1 = y0 + 1
- x0 = torch.floor(x).int()
- x1 = x0 + 1
- # clip out coordinates exceeding feature map volume
- y0 = torch.clamp(y0, zero, max_y + 1)
- y1 = torch.clamp(y1, zero, max_y + 1)
- x0 = torch.clamp(x0, zero, max_x + 1)
- x1 = torch.clamp(x1, zero, max_x + 1)
- x0_float = x0.float()
- x1_float = x1.float()
- y0_float = y0.float()
- y1_float = y1.float()
- vol_a0 = ((y1_float - y) * (x1_float - x)).unsqueeze(-1).to(device)
- vol_c0 = ((y1_float - y) * (x - x0_float)).unsqueeze(-1).to(device)
- vol_a1 = ((y - y0_float) * (x1_float - x)).unsqueeze(-1).to(device)
- vol_c1 = ((y - y0_float) * (x - x0_float)).unsqueeze(-1).to(device)
- outputs = (value_a0 * vol_a0 + value_c0 * vol_c0 + value_a1 * vol_a1 +
- value_c1 * vol_c1)
- if self.morph == 0:
- outputs = outputs.reshape([
- self.num_batch,
- self.num_points * self.width,
- 1 * self.height,
- self.num_channels,
- ])
- outputs = outputs.permute(0, 3, 1, 2)
- else:
- outputs = outputs.reshape([
- self.num_batch,
- 1 * self.width,
- self.num_points * self.height,
- self.num_channels,
- ])
- outputs = outputs.permute(0, 3, 1, 2)
- return outputs
- def deform_conv(self, input, offset, if_offset):
- y, x = self._coordinate_map_3D(offset, if_offset)
- deformed_feature = self._bilinear_interpolate_3D(input, y, x)
- return deformed_feature
在 ultralytics\ultralytics\nn\other_modules\block.py文件中开头声明动态蛇卷积:
- from ultralytics.nn.modules import Conv
- from .dynamic_snake_conv import DySnakeConv
并在__all__中添加:
然后还是在这个block文件中添加C2f DySnakeConv代码:
- ####### 添加 C2f DySnakeConv ##########
- class Bottleneck_DySnakeConv(Bottleneck):
- """Standard bottleneck with DySnakeConv."""
-
- def __init__(self, c1, c2, shortcut=True, g=1, k=(3, 3), e=0.5): # ch_in, ch_out, shortcut, groups, kernels, expand
- super().__init__(c1, c2, shortcut, g, k, e)
- c_ = int(c2 * e) # hidden channels
- self.cv2 = DySnakeConv(c_, c2, k[1])
- self.cv3 = Conv(c2 * 3, c2, k=1)
-
- def forward(self, x):
- """'forward()' applies the YOLOv5 FPN to input data."""
- return x + self.cv3(self.cv2(self.cv1(x))) if self.add else self.cv3(self.cv2(self.cv1(x)))
-
-
- class C2f_DySnakeConv(C2f):
- def __init__(self, c1, c2, n=1, shortcut=False, g=1, e=0.5):
- super().__init__(c1, c2, n, shortcut, g, e)
- self.m = nn.ModuleList(Bottleneck_DySnakeConv(self.c, self.c, shortcut, g, k=(3, 3), e=1.0) for _ in range(n))
首先在该文件开头引入上一步创建的文件夹other_modules:
- from ultralytics.nn.other_modules.block import C2f_DySnakeConv
- from ultralytics.nn.other_modules import *
然后在下边的然后def parse_model模块中添加warehouse_manager:在括号里面添加一个参数warehouse_manager:
def parse_model(d, ch, verbose=True, warehouse_manager=None):
然后在该模块的下边加入C2f_DySnakeConv的运行算法,将原来程序中的685-694行中的内容:
修改为加入C2f_DySnakeConv的运行算法:
- if m in (Classify, Conv, ConvTranspose, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, Focus,
- BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, nn.ConvTranspose2d, DWConvTranspose2d, C3x, RepC3,
- C2f_DBB,C2f_DySnakeConv):
- if args[0] == 'head_channel':
- args[0] = d[args[0]]
- c1, c2 = ch[f], args[0]
- if c2 != nc: # if c2 not equal to number of classes (i.e. for Classify() output)
- c2 = make_divisible(min(c2, max_channels) * width, 8)
-
- args = [c1, c2, *args[1:]]
-
-
- if m in (
- BottleneckCSP, C1, C2, C2f, C3, C3TR, C3Ghost, C3x, RepC3, C2f_DBB,C2f_DySnakeConv):
- args.insert(2, n) # number of repeats
- n = 1
- # Ultralytics YOLO 声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/从前慢现在也慢/article/detail/709785推荐阅读
相关标签
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。