赞
踩
目录
4、输入命令:python -m tensorboard.main --logdir="C:\Users\15535\Desktop\day6\train"
- import torch
- import torchvision.transforms
- from torch.utils.tensorboard import SummaryWriter
- from torchvision import datasets
- from torch.utils.data import DataLoader
- import torch.nn as nn
- from torch.nn import CrossEntropyLoss
-
-
- #step1.下载数据集
-
- train_data=datasets.CIFAR10('./data',train=True,\
- transform=torchvision.transforms.ToTensor(),
- download=True)
- test_data=datasets.CIFAR10('./data',train=False,\
- transform=torchvision.transforms.ToTensor(),
- download=True)
-
- print(len(train_data))
- print(len(test_data))
-
-
- #step2.数据集打包
- train_data_loader=DataLoader(train_data,batch_size=64,shuffle=False)
- test_data_loader=DataLoader(test_data,batch_size=64,shuffle=False)
-
- #step3.搭建网络模型
-
- class My_Module(nn.Module):
- def __init__(self):
- super(My_Module,self).__init__()
- #64*32*32*32
- self.conv1=nn.Conv2d(in_channels=3,out_channels=32,\
- kernel_size=5,padding=2)
-
- #64*32*16*16
- self.maxpool1=nn.MaxPool2d(2)
-
- #64*32*16*16
- self.conv2=nn.Conv2d(in_channels=32,out_channels=32,\
- kernel_size=5,padding=2)
-
- #64*32*8*8
- self.maxpool2=nn.MaxPool2d(2)
-
- #64*64*8*8
- self.conv3=nn.Conv2d(in_channels=32,out_channels=64,\
- kernel_size=5,padding=2)
-
- #64*64*4*4
- self.maxpool3=nn.MaxPool2d(2)
-
- #线性化
- self.flatten=nn.Flatten()
- self.linear1=nn.Linear(in_features=1024,out_features=64)
- self.linear2=nn.Linear(in_features=64,out_features=10)
-
- def forward(self,input):
- #input:64,3,32,32
- output1=self.conv1(input)
- output2=self.maxpool1(output1)
- output3=self.conv2(output2)
- output4=self.maxpool2(output3)
- output5=self.conv3(output4)
- output6=self.maxpool3(output5)
- output7=self.flatten(output6)
- output8=self.linear1(output7)
- output9=self.linear2(output8)
-
- return output9
-
-
- my_model=My_Module()
- # print(my_model)
- loss_func=CrossEntropyLoss()#衡量模型训练的过程(输入输出之间的差值)
- #优化器,lr越大模型就越“聪明”
- optim = torch.optim.SGD(my_model.parameters(),lr=0.001)
-
- writer=SummaryWriter('./train')
- #################################训练###############################
- for looptime in range(10): #模型训练的次数:10
- print("------looptime:{}------".format(looptime+1))
- num=0
- loss_all=0
- for data in (train_data_loader):
- num+=1
- #前向
- imgs, targets = data
- output = my_model(imgs)
- loss_train = loss_func(output,targets)
- loss_all=loss_all+loss_train
- if num%100==0:
- print(loss_train)
-
- #后向backward 三步法 获取最小的损失函数
- optim.zero_grad()
- loss_train.backward()
- optim.step()
-
- # print(output.shape)
- loss_av=loss_all/len(test_data_loader)
- print(loss_av)
- writer.add_scalar('train_loss',loss_av,looptime)
- writer.close()
- #################################验证#########################
- with torch.no_grad():
- accuracy=0
- test_loss_all=0
- for data in test_data_loader:
- imgs,targets = data
- output = my_model(imgs)
- loss_test = loss_func(output,targets)
- #output.argmax(1)---输出标签
- accuracy=(output.argmax(1)==targets).sum()
-
- test_loss_all = test_loss_all+loss_test
- test_loss_av = test_loss_all/len(test_data_loader)
- acc_av = accuracy/len(test_data_loader)
-
- print("测试集的平均损失{},测试集的准确率{}".format(test_loss_av,acc_av))
- writer.add_scalar('test_loss',test_loss_av,looptime)
- writer.add_scalar('acc',acc_av,looptime)
-
- writer.close()
执行下面的操作自动复制
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。