赞
踩
本文章记录yolov5如何通过模型文件yaml搭建模型,从解析yaml参数用途,到parse_model模型构建,最后到yolov5如何使用搭建模型实现模型训练过程。
`
model/yolo.py文件:为模型构建文件,主要为模型集成类class Model(nn.Module),模型yaml参数(如:yolov5s.yaml)构建parse_model(d, ch)
model/common.py文件:为模型模块(或叫模型组装网络模块)
在train.py约113行位置,如下代码:
if pretrained:
with torch_distributed_zero_first(LOCAL_RANK):
weights = attempt_download(weights) # download if not found locally
ckpt = torch.load(weights, map_location=device) # load checkpoint
model = Model(cfg or ckpt['model'].yaml, ch=3, nc=nc, anchors=hyp.get('anchors')).to(device) # create
exclude = ['anchor'] if (cfg or hyp.get('anchors')) and not resume else [] # exclude keys
以yolov5s.yaml文件作为参考,作为解析。
backbone:
# [from, number, module, args]
[[-1, 1, Conv, [64, 6, 2, 2]], # 0-P1/2
[-1, 1, Conv, [128, 3, 2]], # 1-P2/4
[-1, 3, C3, [128]],
[-1, 1, Conv, [256, 3, 2]], # 3-P3/8
[-1, 6, C3, [256]],
[-1, 1, Conv, [512, 3, 2]], # 5-P4/16
[-1, 9, C3, [512]],
[-1, 1, Conv, [1024, 3, 2]], # 7-P5/32
[-1, 3, C3, [1024]],
[-1, 1, SPPF, [1024, 5]], # 9
]
backbone的[-1, 1, Conv, [128, 3, 2]]行作为解读参考,在parse_model(d, ch)中表示,f, n, m, args=[-1, 1, Conv, [128, 3, 2]]。
f为取ch[f]通道(ch保存通道,-1取上次通道数);
m为调用模块函数,通常在common.py中;
n为网络深度depth,使用max[1,int(n*depth_multiple)]赋值,即m结构循环次数;
args对应[128, 3, 2],表示通道数args[0],该值会根据math.ceil(args[0]/8)*8调整,args[1]表示kernel大小,args[2]表示stride,
args[-2:]后2位为m模块传递参数;
backbone的[-1, 3, C3, [128]]行作为解读参考,在parse_model(d, ch)中表示,f, n, m, args=[-1, 1, Conv, [128, 3, 2]]。
f为取ch[f]通道(ch保存通道,-1取上次通道数);
m为调用模块函数,通常在common.py中;
n为网络深度depth,使用max[1,int(n*depth_multiple)]赋值,即m结构循环次数;
args对应[128],表示通道数args[0]为c2,该值会根据math.ceil(args[0]/8)*8调整,决定当前层输出通道数量,而后在parse_model中被下面代码直接忽略,会被插值n=1,在C3代码中表示循环次数,
顺便说下args对应[128,False],在在C3中的False表示是否需要shotcut。
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
# 通过模块,更换n值
if m in [BottleneckCSP, C3, C3TR, C3Ghost]:
args.insert(2, n) # number of repeats
n = 1
C3模块代码如下:
class C3(nn.Module):
# CSP Bottleneck with 3 convolutions
def __init__(self, c1, c2, n=1, shortcut=True, g=1, e=0.5): # ch_in, ch_out, number, shortcut, groups, expansion
super().__init__()
c_ = int(c2 * e) # hidden channels
self.cv1 = Conv(c1, c_, 1, 1)
self.cv2 = Conv(c1, c_, 1, 1)
self.cv3 = Conv(2 * c_, c2, 1) # act=FReLU(c2)
self.m = nn.Sequential(*[Bottleneck(c_, c_, shortcut, g, e=1.0) for _ in range(n)])
# self.m = nn.Sequential(*[CrossConv(c_, c_, 3, 1, g, 1.0, shortcut) for _ in range(n)])
def forward(self, x):
return self.cv3(torch.cat((self.m(self.cv1(x)), self.cv2(x)), dim=1))
head参数
head:
[[-1, 1, Conv, [512, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 6], 1, Concat, [1]], # cat backbone P4
[-1, 3, C3, [512, False]], # 13
[-1, 1, Conv, [256, 1, 1]],
[-1, 1, nn.Upsample, [None, 2, 'nearest']],
[[-1, 4], 1, Concat, [1]], # cat backbone P3
[-1, 3, C3, [256, False]], # 17 (P3/8-small)
[-1, 1, Conv, [256, 3, 2]],
[[-1, 14], 1, Concat, [1]], # cat head P4
[-1, 3, C3, [512, False]], # 20 (P4/16-medium)
[-1, 1, Conv, [512, 3, 2]],
[[-1, 10], 1, Concat, [1]], # cat head P5
[-1, 3, C3, [1024, False]], # 23 (P5/32-large)
[[17, 20, 23], 1, Detect, [nc, anchors]], # Detect(P3, P4, P5)
]
head的[[-1, 6], 1, Concat, [1]]行作为解读参考,在parse_model(d, ch)中表示,f, n, m, args=[[-1, 6], 1, Concat, [1]]。
f为取ch[-1]与ch[6]通道数和,且6会被保存到save列表中,在forward中该列表对应层模块输出会被保存
;
m为调用模块函数,通常在common.py中;
n为网络深度depth,使用max[1,int(n*depth_multiple)]赋值,即m结构循环次数,但这里必然为1;
args对应[1],表示通cat维度,这里为1,表示通道叠加;
head的[[17, 20, 23], 1, Detect, [nc, anchors]]行作为解读参考,在parse_model(d, ch)中表示,f, n, m, args=[[17, 20, 23], 1, Detect, [nc, anchors]]。
f表示需要使用的层,并分别在17,20,23层获取对应通道,可通过yaml从backbone开始从0开始数的那一行,如17对应[-1, 3, C3, [256, False]], # 17 (P3/8-small),
同时,17、20、23也会被保存到save列表中
;
m为调用模块函数,通常在common.py中;
n为网络深度depth,使用max[1,int(n*depth_multiple)]赋值,即m结构循环次数,但这里必然为1;
args对应[nc, anchors],表示去nc数量与anchor三个列表,同时会将f找到的通道作为列表添加到args中,如下代码示意,
最终args大致为[80, [[10, 13, 16, 30, 33, 23], [30, 61, 62, 45, 59, 119], [116, 90, 156, 198, 373, 326]], [128, 256, 512]],
80为类别nc,[128, 256, 512]为f对应的通道,其它为anchor值;
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
yolov5模型集成网络代码如下,其重要解读已在代码中注释。
class Model(nn.Module):
def __init__(self, cfg='yolov5s.yaml', ch=3, nc=None, anchors=None): # model, input channels, number of classes
super().__init__()
if isinstance(cfg, dict):
self.yaml = cfg # model dict
else: # is *.yaml
import yaml # for torch hub
self.yaml_file = Path(cfg).name
with open(cfg, errors='ignore') as f:
self.yaml = yaml.safe_load(f) # model dict
# Define model
ch = self.yaml['ch'] = self.yaml.get('ch', ch) # input channels
if nc and nc != self.yaml['nc']:
LOGGER.info(f"Overriding model.yaml nc={self.yaml['nc']} with nc={nc}")
self.yaml['nc'] = nc # override yaml value
if anchors:
LOGGER.info(f'Overriding model.yaml anchors with anchors={anchors}')
self.yaml['anchors'] = round(anchors) # override yaml value
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
self.names = [str(i) for i in range(self.yaml['nc'])] # default names,将其对应数字转为字符串
self.inplace = self.yaml.get('inplace', True)
# Build strides, anchors 以下为detect模块设置参数
m = self.model[-1] # Detect()
if isinstance(m, Detect):
s = 256 # 2x min stride
m.inplace = self.inplace
# 通过给定假设输入为torch.zeros(1, ch, s, s)获得stride
m.stride = torch.tensor([s / x.shape[-2] for x in self.forward(torch.zeros(1, ch, s, s))]) # forward
m.anchors /= m.stride.view(-1, 1, 1) # 变换获得每一特征层的anchor
check_anchor_order(m)
self.stride = m.stride # [8,16,32]
self._initialize_biases() # only run once,为检测detect设置bias初始化
# Init weights, biases
initialize_weights(self)
self.info()
LOGGER.info('')
def forward(self, x, augment=False, profile=False, visualize=False):
if augment:
return self._forward_augment(x) # augmented inference, None
return self._forward_once(x, profile, visualize) # single-scale inference, train
以上代码最重要网络搭建模块,如下代码调用,我将在下一节解读。
self.model, self.save = parse_model(deepcopy(self.yaml), ch=[ch]) # model, savelist
以上代码最重要网络运行forward,如下代码调用,我将重点解读。
模型在训练时候,是调用下面模块,如下:
return self._forward_once(x, profile, visualize) # single-scale inference, train
m.f和m.i已在上面yaml中介绍,实际需重点关注y保存save列表对应的特征层输出,若没有则保留为none占位,其代码解读已在有注释,详情如下:
def _forward_once(self, x, profile=False, visualize=False):
y, dt = [], [] # outputs
for m in self.model:
# 通过m.f确定改变m模块输入变量值,若为列表如[-1,6]一般为cat或detect,一般需要给定输入什么特征
if m.f != -1: # if not from previous layer
# 若m.f为[-1,6]这种情况,则[x if j == -1 else y[j] for j in m.f]运行此块,该块将-1变成了上一层输出x与对应6的输出
x = y[m.f] if isinstance(m.f, int) else [x if j == -1 else y[j] for j in m.f] # from earlier layers
if profile:
self._profile_one_layer(m, x, dt)
x = m(x) # run
# 通过之前parse_model获得save列表(已赋值给self.save),将其m模块输出结果保存到y列表中,否则使用none代替位置
# 这里m.i是索引,是yaml每行的模块索引
y.append(x if m.i in self.save else None) # save output
if visualize:
feature_visualization(x, m.type, m.i, save_dir=visualize)
return x
该部分是yolov5根据对应yaml模型文件搭建的网络,需结合对应yaml文件一起解读,我已在上面介绍了yaml文件,可自行查看。
同时,也需要重点关注 m_.i, m_.f, m_.type, m_.np = i, f, t, np,会在上面_forward_once函数中用到。
本模块代码解读,也已在代码注释中,请查看源码理解,代码如下:
def parse_model(d, ch): # model_dict, input_channels(3)
LOGGER.info('\n%3s%18s%3s%10s %-40s%-30s' % ('', 'from', 'n', 'params', 'module', 'arguments'))
anchors, nc, gd, gw = d['anchors'], d['nc'], d['depth_multiple'], d['width_multiple']
na = (len(anchors[0]) // 2) if isinstance(anchors, list) else anchors # number of anchors,获得每个特征点anchor数量,为3
no = na * (nc + 5) # 最终预测输出数量, number of outputs = anchors * (classes + 5)
# layers保存yaml每一行处理作为一层,使用列表保存,最后输出使用nn.Sequential(*layers)处理作为模型层连接
# c2为yaml每一行通道输出预定义数量,需与width_multiple参数共同决定
layers, save, c2 = [], [], ch[-1] # layers, savelist, ch out # ch为channel数量,初始值为[3]
for i, (f, n, m, args) in enumerate(d['backbone'] + d['head']): # from, number, module, args
# eval这个函数会把里面的字符串参数的引号去掉,把中间的内容当成Python的代码
# i为每一层附带索引,相当于对yaml每一行的模块设置编号
m = eval(m) if isinstance(m, str) else m # eval strings
for j, a in enumerate(args):
try:
args[j] = eval(a) if isinstance(a, str) else a # eval strings
except NameError:
pass
n = n_ = max(round(n * gd), 1) if n > 1 else n # 获得最终深度,循环次数,depth gain
# 不同网络结构模块处理,同时会改变对应c2通道
if m in [Conv, GhostConv, Bottleneck, GhostBottleneck, SPP, SPPF, DWConv, MixConv2d, Focus, CrossConv,
BottleneckCSP, C3, C3TR, C3SPP, C3Ghost]: # 是否在设定模块内
c1, c2 = ch[f], args[0]
if c2 != no: # if not output
c2 = make_divisible(c2 * gw, 8)
args = [c1, c2, *args[1:]]
# 通过模块,更换n值
if m in [BottleneckCSP, C3, C3TR, C3Ghost]:
args.insert(2, n) # number of repeats
n = 1
elif m is nn.BatchNorm2d:
args = [ch[f]]
elif m is Concat:
c2 = sum([ch[x] for x in f]) # 将最后一层通道数与cancat通道叠加求和,如[[-1, 6], 1, Concat, [1]]将-1与第6通道求和
elif m is Detect:
args.append([ch[x] for x in f])
if isinstance(args[1], int): # number of anchors
args[1] = [list(range(args[1] * 2))] * len(f)
elif m is Contract:
c2 = ch[f] * args[0] ** 2
elif m is Expand:
c2 = ch[f] // args[0] ** 2
else:
c2 = ch[f]
m_ = nn.Sequential(*[m(*args) for _ in range(n)]) if n > 1 else m(*args) # module
t = str(m)[8:-2].replace('__main__.', '') # module type
np = sum([x.numel() for x in m_.parameters()]) # number params,计算参数量
m_.i, m_.f, m_.type, m_.np = i, f, t, np # attach index, 'from' index, type, number params ,将其赋给模型,后面forward会使用到
LOGGER.info('%3s%18s%3s%10.0f %-40s%-30s' % (i, f, n_, np, t, args)) # print
save.extend(x % i for x in ([f] if isinstance(f, int) else f) if x != -1) # append to savelist
layers.append(m_) #
if i == 0:
ch = [] # 删除 输入的3 通道
ch.append(c2) # 保存每个模块的通道,即yaml的每行均保存,包含concat啥都保存
return nn.Sequential(*layers), sorted(save)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。