赞
踩
EfficientNetV2: Smaller Models and Faster Training
霹雳吧啦Wz:10.1 EfficientNetV2网络详解
太阳花的小绿豆:EfficientNetV2网络详解
相比EfficientNetV1,EfficientNetV2中更加关注模型训练速度
EfficientNetV2主要贡献如下:
在网络浅层使用Depthwise convolutions速度会很慢
Fused_MBConv与MBConv相比就是将1 x 1卷积核dw卷积换成一个3 x 3卷积
深度和宽度的调整:
EfficientNetV1的每个stage中深度和宽度都是同等放大的,但同等的放大每个stage是次优的。每个stage对网络的训练速度以及参数数量的贡献并不相同,所以直接使用同等缩放是不合理的。EfficientNetV2中使用了非均匀缩放策略来缩放模型。
表中stride对应的每个stage中第一个operator的stride,SE0.25表示的是SE中第一个全连接层节点个数是输入MBConv模块channel的1/4,这与EfficientNetV1是一样的。
EfficientNetV2-S的结构如下表
与EfficientNetV1的不同:
Fused_MBConv模块
当expansion ratio等于1时是没有expand conv的,还有这里是没有使用到SE结构的(原论文图中有SE)。下图是expansion 为1和不为1的两种情况
shortcut存在条件和v1一样,Dropout层还是只有在存在shortcut时才存在。
Dropout:采用的stochastic depth,以一定的概率将主分支上的输出进行丢弃,如下图f3处。直接将上一层的输出直接到下一层的输入,相当于没有这一层了。
网络就是一个随机的深度了,这样可以提升训练速度,小幅提升准确率
注:上面说的dropout层仅指MBConv和Fused_MBConv中的dropout,不包括最后全连接层前的dropout层。
MBConv模块
与EfficientNetV1基本一致。
EfficientNetV2的L和M比S多一个stage
渐进式学习策略(Progressive learning):
在使用不同输入图像时应使用不同的正则化方法,在早期使用较小的训练尺寸以及较弱的正则化方法能快速学到一些简单的表达,逐渐提升图像尺寸,同时增强正则化方法。
将渐进式学习策略运用到ResNet和EfficientNet上,如下表可看训练时间时间少了的
将卷积、BN、激活组合,与V1基本一样
与V1一样
在前向传播中,将特征矩阵在2和3两个维度求均值(就是高和宽的维度),使用adaptive_avg_pool2d(x, output_size=(1, 1))也可以有同样效果。
与V1基本一样
class FusedMBConv(nn.Module): def __init__(self, kernel_size: int, input_c: int, out_c: int, expand_ratio: int, stride: int, se_ratio: float, drop_rate: float, norm_layer: Callable[..., nn.Module]): super(FusedMBConv, self).__init__() assert stride in [1, 2] assert se_ratio == 0 self.has_shortcut = stride == 1 and input_c == out_c self.drop_rate = drop_rate self.has_expansion = expand_ratio != 1 activation_layer = nn.SiLU # alias Swish expanded_c = input_c * expand_ratio # 只有当expand ratio不等于1时才有expand conv if self.has_expansion: # Expansion convolution self.expand_conv = ConvBNAct(input_c, expanded_c, kernel_size=kernel_size, stride=stride, norm_layer=norm_layer, activation_layer=activation_layer) self.project_conv = ConvBNAct(expanded_c, out_c, kernel_size=1, norm_layer=norm_layer, activation_layer=nn.Identity) # 注意没有激活函数 else: # 当只有project_conv时的情况 self.project_conv = ConvBNAct(input_c, out_c, kernel_size=kernel_size, stride=stride, norm_layer=norm_layer, activation_layer=activation_layer) # 注意有激活函数 self.out_channels = out_c # 只有在使用shortcut连接时才使用dropout层 self.drop_rate = drop_rate if self.has_shortcut and drop_rate > 0: self.dropout = DropPath(drop_rate) def forward(self, x: Tensor) -> Tensor: if self.has_expansion: result = self.expand_conv(x) result = self.project_conv(result) else: result = self.project_conv(x) if self.has_shortcut: if self.drop_rate > 0: result = self.dropout(result) result += x return result
分为expand ratio等于1和不等于两种情况,不等于1时才有expand conv
后面与MBConv逻辑类似。
可以分为3各部分:
class EfficientNetV2(nn.Module): def __init__(self, model_cnf: list, num_classes: int = 1000, num_features: int = 1280, dropout_rate: float = 0.2, drop_connect_rate: float = 0.2): super(EfficientNetV2, self).__init__() for cnf in model_cnf: assert len(cnf) == 8 norm_layer = partial(nn.BatchNorm2d, eps=1e-3, momentum=0.1) stem_filter_num = model_cnf[0][4] self.stem = ConvBNAct(3, stem_filter_num, kernel_size=3, stride=2, norm_layer=norm_layer) # 激活函数默认是SiLU total_blocks = sum([i[0] for i in model_cnf]) block_id = 0 blocks = [] for cnf in model_cnf: repeats = cnf[0] op = FusedMBConv if cnf[-2] == 0 else MBConv for i in range(repeats): blocks.append(op(kernel_size=cnf[1], input_c=cnf[4] if i == 0 else cnf[5], out_c=cnf[5], expand_ratio=cnf[3], stride=cnf[2] if i == 0 else 1, se_ratio=cnf[-1], drop_rate=drop_connect_rate * block_id / total_blocks, norm_layer=norm_layer)) block_id += 1 self.blocks = nn.Sequential(*blocks) head_input_c = model_cnf[-1][-3] head = OrderedDict() head.update({"project_conv": ConvBNAct(head_input_c, num_features, kernel_size=1, norm_layer=norm_layer)}) # 激活函数默认是SiLU head.update({"avgpool": nn.AdaptiveAvgPool2d(1)}) head.update({"flatten": nn.Flatten()}) if dropout_rate > 0: head.update({"dropout": nn.Dropout(p=dropout_rate, inplace=True)}) head.update({"classifier": nn.Linear(num_features, num_classes)}) self.head = nn.Sequential(head) # initial weights for m in self.modules(): if isinstance(m, nn.Conv2d): nn.init.kaiming_normal_(m.weight, mode="fan_out") if m.bias is not None: nn.init.zeros_(m.bias) elif isinstance(m, nn.BatchNorm2d): nn.init.ones_(m.weight) nn.init.zeros_(m.bias) elif isinstance(m, nn.Linear): nn.init.normal_(m.weight, 0, 0.01) nn.init.zeros_(m.bias) def forward(self, x: Tensor) -> Tensor: x = self.stem(x) x = self.blocks(x) x = self.head(x) return x
for cnf in model_cnf:
assert len(cnf) == 8
model_config 每一行8个元素,这里是为了防止修改时出现问题。
分别是layer、kernel_size、stride、expand ratio、in_channel、o_channel、使用哪种模块(0-FusedMBConv 或 1-MBConv )、SE模块(FusedMBConv 没使用,均为0)
def efficientnetv2_s(num_classes: int = 1000): # train_size: 300, eval_size: 384 # repeat, kernel, stride, expansion, in_c, out_c, operator, se_ratio model_config = [[2, 3, 1, 1, 24, 24, 0, 0], [4, 3, 2, 4, 24, 48, 0, 0], [4, 3, 2, 4, 48, 64, 0, 0], [6, 3, 2, 4, 64, 128, 1, 0.25], [9, 3, 1, 6, 128, 160, 1, 0.25], [15, 3, 2, 6, 160, 256, 1, 0.25]] model = EfficientNetV2(model_cnf=model_config, num_classes=num_classes, dropout_rate=0.2) return model
def efficientnetv2_m(num_classes: int = 1000): # train_size: 384, eval_size: 480 # repeat, kernel, stride, expansion, in_c, out_c, operator, se_ratio model_config = [[3, 3, 1, 1, 24, 24, 0, 0], [5, 3, 2, 4, 24, 48, 0, 0], [5, 3, 2, 4, 48, 80, 0, 0], [7, 3, 2, 4, 80, 160, 1, 0.25], [14, 3, 1, 6, 160, 176, 1, 0.25], [18, 3, 2, 6, 176, 304, 1, 0.25], [5, 3, 1, 6, 304, 512, 1, 0.25]] model = EfficientNetV2(model_cnf=model_config, num_classes=num_classes, dropout_rate=0.3) return model
def efficientnetv2_l(num_classes: int = 1000): # train_size: 384, eval_size: 480 # repeat, kernel, stride, expansion, in_c, out_c, operator, se_ratio model_config = [[4, 3, 1, 1, 32, 32, 0, 0], [7, 3, 2, 4, 32, 64, 0, 0], [7, 3, 2, 4, 64, 96, 0, 0], [10, 3, 2, 4, 96, 192, 1, 0.25], [19, 3, 1, 6, 192, 224, 1, 0.25], [25, 3, 2, 6, 224, 384, 1, 0.25], [7, 3, 1, 6, 384, 640, 1, 0.25]] model = EfficientNetV2(model_cnf=model_config, num_classes=num_classes, dropout_rate=0.4) return model
img_size = {"s": [300, 384], # train_size, val_size
"m": [384, 480],
"l": [384, 480]}
data_transform = {
"train": transforms.Compose([transforms.RandomResizedCrop(img_size[num_model][0]),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),
"val": transforms.Compose([transforms.Resize(img_size[num_model][1]),
transforms.CenterCrop(img_size[num_model][1]),
transforms.ToTensor(),
transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。