赞
踩
- import torch
- import torch.nn as nn
- import torch.nn.functional as F
-
-
-
- class BasicConv2d(nn.Module):
- """
- 这是一个基础的卷积模块,可进行参数设置,膨胀卷积和其他参数
- """
- def __init__(self, in_planes, out_planes, kernel_size, stride=1, padding=0, dilation=1):
- super(BasicConv2d, self).__init__()
- self.conv = nn.Conv2d(in_planes, out_planes,
- kernel_size=kernel_size, stride=stride,
- padding=padding, dilation=dilation, bias=False)
- self.bn = nn.BatchNorm2d(out_planes)
- self.relu = nn.ReLU(inplace=True)
-
- def forward(self, x):
- x = self.conv(x)
- x = self.bn(x)
- return x
- class RFB_modified(nn.Module):
- def __init__(self, in_channel, out_channel):
- super(RFB_modified, self).__init__()
- self.relu = nn.ReLU(True)
- self.branch0 = nn.Sequential(
- BasicConv2d(in_channel, out_channel, 1),
- )#通道变换
- self.branch1 = nn.Sequential(
- BasicConv2d(in_channel, out_channel, 1),
- BasicConv2d(out_channel, out_channel, kernel_size=(1, 3), padding=(0, 1)),
- BasicConv2d(out_channel, out_channel, kernel_size=(3, 1), padding=(1, 0)),
- BasicConv2d(out_channel, out_channel, 3, padding=3, dilation=3)
- )#第一分支不变尺寸卷积
- self.branch2 = nn.Sequential(
- BasicConv2d(in_channel, out_channel, 1),
- BasicConv2d(out_channel, out_channel, kernel_size=(1, 5), padding=(0, 2)),
- BasicConv2d(out_channel, out_channel, kernel_size=(5, 1), padding=(2, 0)),
- BasicConv2d(out_channel, out_channel, 3, padding=5, dilation=5)
- )##第二分支不变尺寸卷积
- self.branch3 = nn.Sequential(
- BasicConv2d(in_channel, out_channel, 1),
- BasicConv2d(out_channel, out_channel, kernel_size=(1, 7), padding=(0, 3)),
- BasicConv2d(out_channel, out_channel, kernel_size=(7, 1), padding=(3, 0)),
- BasicConv2d(out_channel, out_channel, 3, padding=7, dilation=7)
- )#第三分支不变尺寸卷积
- self.conv_cat = BasicConv2d(4*out_channel, out_channel, 3, padding=1)
- #多尺度拼接以后进行将通道
- self.conv_res = BasicConv2d(in_channel, out_channel, 1)
- #通道压缩
- def forward(self, x):
- x0 = self.branch0(x)
- x1 = self.branch1(x)
- print(x1.shape)
- x2 = self.branch2(x)
- print(x2.shape)
- x3 = self.branch3(x)
- print(x3.shape)
- x_cat = self.conv_cat(torch.cat((x0, x1, x2, x3), 1))#多尺度拼接
- print(x_cat.shape)
- x = self.relu(x_cat + self.conv_res(x))
- return x
- if __name__ == '__main__':
- ras = RFB_modified(1,1).cuda()
- input_tensor = torch.randn(1, 1, 352, 352).cuda()
- out = ras(input_tensor)
- print(out)
- print(out.shape)

赞
踩
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。