赞
踩
第一章 ResNet
'''
残差块
'''
import torch
from torch import nn
class Residual(nn.Module):
def __init__(self, channels_in, channels_out, is_use_1x1conv=False, strides=1):
super(Residual, self).__init__()
self.conv2 = nn.Conv2d(channels_out, channels_out, kernel_size=3, padding=1)
self.bn2 = nn.BatchNorm2d(channels_out)
if is_use_1x1conv:
self.conv1 = nn.Conv2d(channels_in, channels_out, kernel_size=3, stride=strides,
padding=1) # (56-3+2=55)/2+1=28
self.bn1 = nn.BatchNorm2d(channels_out)
self.conv3 = nn.Conv2d(channels_in, channels_out, kernel_size=1, stride=strides) # (56-1=55)/2+1=28
else:
self.conv1 = nn.Conv2d(channels_in, channels_out, kernel_size=3, stride=strides, padding=1) #
self.bn1 = nn.BatchNorm2d(channels_out)
self.conv3 = None
self.relu = nn.ReLU()
def forward(self, x):
y = self.bn1(self.relu(self.conv1(x)))
y = self.bn2(self.relu(self.conv2(y)))
if self.conv3:
x = self.conv3(x)
return self.relu(x + y)
import torch
from torch import nn
from Residual import *
'''
block模块
'''
class ResNetBlock(nn.Module):
def __init__(self, channels_in, channels_out, blocks_num, is_first_block=False):
super(ResNetBlock, self).__init__()
self.listLayers = nn.Sequential()
# self.listLayers = []
for i in range(blocks_num):
if i == 0 and not is_first_block:
self.listLayers.append(Residual(channels_in, channels_out, is_use_1x1conv=True, strides=2))
else:
self.listLayers.append(Residual(channels_out, channels_out))
def forward(self, x):
for layer in self.listLayers:
x = layer(x)
return x
'''
残差网络
'''
import torch
from torch import nn
from torchsummary import summary
import torchvision
from ResNetBlock import *
class ResNet(nn.Module):
def __init__(self, blocks_num):
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(1, 64, kernel_size=7, stride=2, padding=3)
self.bn1 = nn.BatchNorm2d(64)
self.relu1 = nn.ReLU(inplace=True)
self.maxpool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
# -------------------------------------------------
self.resnet_block1 = ResNetBlock(channels_in=64, channels_out=64, blocks_num=blocks_num[0], is_first_block=True)
self.resnet_block2 = ResNetBlock(channels_in=64, channels_out=128, blocks_num=blocks_num[1])
self.resnet_block3 = ResNetBlock(channels_in=128, channels_out=256, blocks_num=blocks_num[2])
self.resnet_block4 = ResNetBlock(channels_in=256, channels_out=512, blocks_num=blocks_num[3])
# -------------------------------------------------
self.avgpool = nn.AdaptiveAvgPool2d((1,1)) # (7-3+0)/2+1=1
self.fl=nn.Flatten()
self.fc = nn.Linear(in_features=512, out_features=10)
self.softmax = nn.Softmax(1)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.maxpool1(x)
x = self.resnet_block1(x)
x = self.resnet_block2(x)
x = self.resnet_block3(x)
x = self.resnet_block4(x)
x = self.avgpool(x)
x = self.fl(x)
x = self.fc(x)
x = self.softmax(x)
return x
if __name__ == '__main__':
mynet = ResNet([2, 2, 2, 2])
batch_size=1
input = torch.ones((batch_size, 1, 224, 224))
output = mynet(input)
print(output.shape)
print(output.argmax(1))
device=torch.device("cuda")
mynet=mynet.to(device)
summary(mynet,(1,224,224))
取部分数据训练。
15轮训练后,准确率77%。
19轮训练后,准确率86%。
训练数据集的长度为:500
测试数据集的长度为:500
-----------------第1轮训练开始-------------------
训练次数:100,损失:2.3265645503997803
整体测试集上的损失:277.19032287597656
整体测试集上的准确率:0.21800000965595245
模型已经保存
-----------------第2轮训练开始-------------------
训练次数:200,损失:2.349266290664673
整体测试集上的损失:268.2144788503647
整体测试集上的准确率:0.28600001335144043
模型已经保存
-----------------第3轮训练开始-------------------
训练次数:300,损失:2.3015480041503906
整体测试集上的损失:255.96847009658813
整体测试集上的准确率:0.43400001525878906
模型已经保存
-----------------第4轮训练开始-------------------
训练次数:400,损失:2.179131031036377
训练次数:500,损失:1.9270331859588623
整体测试集上的损失:242.5845685005188
整体测试集上的准确率:0.5380000472068787
模型已经保存
-----------------第5轮训练开始-------------------
训练次数:600,损失:1.9587137699127197
整体测试集上的损失:225.19620382785797
整体测试集上的准确率:0.7200000286102295
模型已经保存
-----------------第6轮训练开始-------------------
训练次数:700,损失:1.702500820159912
整体测试集上的损失:218.9166295528412
整体测试集上的准确率:0.7320000529289246
模型已经保存
-----------------第7轮训练开始-------------------
训练次数:800,损失:1.5008697509765625
整体测试集上的损失:214.1523060798645
整体测试集上的准确率:0.7760000228881836
模型已经保存
-----------------第8轮训练开始-------------------
训练次数:900,损失:1.7054476737976074
训练次数:1000,损失:1.7705843448638916
整体测试集上的损失:213.46261644363403
整体测试集上的准确率:0.7740000486373901
模型已经保存
-----------------第9轮训练开始-------------------
训练次数:1100,损失:1.5386680364608765
整体测试集上的损失:213.34011447429657
整体测试集上的准确率:0.7660000324249268
模型已经保存
-----------------第10轮训练开始-------------------
训练次数:1200,损失:1.56679105758667
整体测试集上的损失:210.29412269592285
整体测试集上的准确率:0.7880000472068787
模型已经保存
-----------------第11轮训练开始-------------------
训练次数:1300,损失:1.4770478010177612
整体测试集上的损失:213.10554945468903
整体测试集上的准确率:0.7580000162124634
模型已经保存
-----------------第12轮训练开始-------------------
训练次数:1400,损失:1.4702129364013672
训练次数:1500,损失:1.4863967895507812
整体测试集上的损失:209.73208153247833
整体测试集上的准确率:0.7860000133514404
模型已经保存
-----------------第13轮训练开始-------------------
训练次数:1600,损失:1.4941433668136597
整体测试集上的损失:209.60296595096588
整体测试集上的准确率:0.7860000133514404
模型已经保存
-----------------第14轮训练开始-------------------
训练次数:1700,损失:1.485489010810852
整体测试集上的损失:208.73865723609924
整体测试集上的准确率:0.7920000553131104
模型已经保存
-----------------第15轮训练开始-------------------
训练次数:1800,损失:1.4636321067810059
整体测试集上的损失:210.69604396820068
整体测试集上的准确率:0.7720000147819519
模型已经保存
-----------------第16轮训练开始-------------------
训练次数:1900,损失:1.471869945526123
训练次数:2000,损失:1.4679715633392334
整体测试集上的损失:208.93938863277435
整体测试集上的准确率:0.7900000214576721
模型已经保存
-----------------第17轮训练开始-------------------
训练次数:2100,损失:1.7043009996414185
整体测试集上的损失:207.3022402524948
整体测试集上的准确率:0.812000036239624
模型已经保存
-----------------第18轮训练开始-------------------
训练次数:2200,损失:1.7067052125930786
整体测试集上的损失:205.76199901103973
整体测试集上的准确率:0.8320000171661377
模型已经保存
-----------------第19轮训练开始-------------------
训练次数:2300,损失:1.4999620914459229
整体测试集上的损失:202.52664244174957
整体测试集上的准确率:0.862000048160553
模型已经保存
-----------------第20轮训练开始-------------------
训练次数:2400,损失:1.4625636339187622
训练次数:2500,损失:1.7677518129348755
整体测试集上的损失:202.82109951972961
整体测试集上的准确率:0.8460000157356262
模型已经保存
使用公式W2 =(W - F +2P)/S +1,配合模型结构搭建模型。
tensflow中卷积层需要指明卷积核数量,torch中需要指明输入输出通道数。
补充:
假设输入图片为 W x W 卷积核大小为F x F,步长stride=S,padding=P(填充的像素数)
则输出图像的大小 W2 =(W - F +2P)/S +1
可以注意到这个公式中有除法,一般我们做卷积时除不尽的时候都向下取整
torch官网公式如下:
调用的类ResNetBlock中构建子网络,使用self.listLayers = nn.Sequential()
,保证了init能拿到这层结构。输入和权重的都加入了cuda,类型一致。
class ResNetBlock(nn.Module):
def __init__(self, channels_in, channels_out, blocks_num, is_first_block=False):
super(ResNetBlock, self).__init__()
self.listLayers = nn.Sequential()
# self.listLayers = [] #这种报错
for i in range(blocks_num):
if i == 0 and not is_first_block:
self.listLayers.append(Residual(channels_in, channels_out, is_use_1x1conv=True, strides=2))
else:
self.listLayers.append(Residual(channels_out, channels_out))
def forward(self, x):
for layer in self.listLayers:
x = layer(x)
return x
512是前面层的输出维度,10是10分类。
from torchsummary import summary
device=torch.device("cuda")
mynet=mynet.to(device)
summary(mynet,(1,224,224))
环境变量中添加anaconda的路径。
Anaconda powershell prompt中激活torch环境:
conda activate pytorch
运行tensorboard:
tensorboard --logdir=生成的tensorboard log的文件夹绝对路径
复制连接到浏览器查看tensorboard绘制的图
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。