赞
踩
目录
- # 准备数据集
- from torch.utils.data import DataLoader
-
- train_data = torchvision.datasets.CIFAR10(root="datasets", train=True,
- transform=torchvision.transforms.ToTensor(),
- download=False)
-
- test_data = torchvision.datasets.CIFAR10(root="datasets", train=False,
- transform=torchvision.transforms.ToTensor(),
- download=False)
-
- train_data_size = len(train_data)
- test_data_size = len(test_data)
- print("训练数据集的长度:{}".format(train_data_size))
- print("测试数据集的长度:{}".format(test_data_size))
- train_dataloader = DataLoader(train_data, batch_size=64)
- test_dataloader = DataLoader(test_data, batch_size=64)
- from model import *
- cyx = CYX()
- # 搭建神经网络
- import torch
- from torch import nn
- from torch.nn import Sequential, Conv2d, MaxPool2d, Flatten, Linear
-
-
- class CYX(nn.Module):
- def __init__(self):
- super(CYX, self).__init__()
- self.model1 = Sequential(
- Conv2d(in_channels=3, out_channels=32, kernel_size=5,
- padding=2, stride=1),
- MaxPool2d(2),
- Conv2d(32, 32, 5, padding=2),
- MaxPool2d(2),
- Conv2d(32, 64, 5, padding=2),
- MaxPool2d(2),
- Flatten(),
- Linear(1024, 64),
- Linear(64, 10)
- )
-
- def forward(self, x):
- x = self.model1(x)
- return x
-
- # 验证模型是否成功搭建
-
- # if __name__ == '__main__':
- # cyx = CYX()
- # input = torch.ones((64, 3, 32, 32))
- # output = cyx(input)
- # print(output.shape)
loss_fn = nn.CrossEntropyLoss()
- learning_rate = 0.01
- optimizer = torch.optim.SGD(cyx.parameters(), lr=learning_rate)
- # 记录训练的次数
- total_train_step = 0
- # 记录测试的次数
- total_test_step = 0
- # 记录训练的轮数
- epoch = 10
writer = SummaryWriter("logs_train")
- for i in range(epoch):
- print("---------第{}轮训练开始---------".format(i+1))
- # 训练步骤开始
- # 只对特定层起作用
- cyx.train()
- for data in train_dataloader:
- imgs, targets = data
- outputs = cyx(imgs)
- loss = loss_fn(outputs, targets)
- # 优化器优化
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- total_train_step = total_train_step + 1
- # 每逢100的时候再打印(好看)
- if total_train_step % 100 == 0:
- # .item()会把tensor里面的变成数字,比如tensor(5)->5
- print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))
- writer.add_scalar("train_loss", loss.item(), total_train_step)
-
- # 测试步骤开始
- # 只对特定层起作用
- cyx.eval()
- # 看的是整个数据集的损失
- total_test_loss = 0
- total_accuracy = 0
- # 不累计梯度
- with torch.no_grad():
- for data in test_dataloader:
- imgs, targets = data
- outputs = cyx(imgs)
- loss = loss_fn(outputs, targets)
- total_test_loss = total_test_loss + loss.item()
- # 见截图
- accuracy = (outputs.argmax(1) == targets).sum()
- total_accuracy = total_accuracy + accuracy
-
- print("整体测试集上的Loss:{}".format(total_test_loss))
- print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
- writer.add_scalar("test_loss", total_test_loss, total_test_step)
- writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_train_step)
- total_test_step = total_test_step + 1
-
- torch.save(cyx, "cyx_cpu_{}.pth".format(i))
- # torch.save(cyx.state_dict(), "cyx_{}.pth".format(i))
- print("模型已保存")
注意细节:
是否启用 Batch Normalization 和 Dropout,分别在训练时和测试时添加。
参考文章:
【Pytorch】model.train() 和 model.eval() 原理与用法
把tensor里面的变成数字,比如tensor(5)->5
参考文章:
with 语句适用于对资源进行访问的场合,确保不管使用过程中是否发生异常都会执行必要的“清理”操作,释放资源,比如文件使用后自动关闭/线程中锁的自动获取和释放等。
因为是测试,不累计梯度
参考文章:
【pytorch】 with torch.no_grad():用法详解
writer.close()
- import torch
- import torchvision
- from torch import nn
- from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
- from torch.utils.tensorboard import SummaryWriter
-
- from model import *
-
- # 准备数据集
- from torch.utils.data import DataLoader
-
- train_data = torchvision.datasets.CIFAR10(root="datasets", train=True,
- transform=torchvision.transforms.ToTensor(),
- download=False)
-
- test_data = torchvision.datasets.CIFAR10(root="datasets", train=False,
- transform=torchvision.transforms.ToTensor(),
- download=False)
-
- train_data_size = len(train_data)
- test_data_size = len(test_data)
- print("训练数据集的长度:{}".format(train_data_size))
- print("测试数据集的长度:{}".format(test_data_size))
-
- # 利用dataloader加载数据集
- train_dataloader = DataLoader(train_data, batch_size=64)
- test_dataloader = DataLoader(test_data, batch_size=64)
-
- # 创建网络模型
- cyx = CYX()
-
- # 损失函数
- loss_fn = nn.CrossEntropyLoss()
-
- # 优化器
- learning_rate = 0.01
- optimizer = torch.optim.SGD(cyx.parameters(), lr=learning_rate)
-
- # 设置训练网络的参数
- # 记录训练的次数
- total_train_step = 0
- # 记录测试的次数
- total_test_step = 0
- # 记录训练的轮数
- epoch = 10
-
- # 添加tensorboard
- writer = SummaryWriter("logs_train")
-
- for i in range(epoch):
- print("---------第{}轮训练开始---------".format(i+1))
- # 训练步骤开始
- # 只对特定层起作用
- cyx.train()
- for data in train_dataloader:
- imgs, targets = data
- outputs = cyx(imgs)
- loss = loss_fn(outputs, targets)
- # 优化器优化
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
- total_train_step = total_train_step + 1
- # 每逢100的时候再打印(好看)
- if total_train_step % 100 == 0:
- # .item()会把tensor里面的变成数字,比如tensor(5)->5
- print("训练次数:{},Loss:{}".format(total_train_step, loss.item()))
- writer.add_scalar("train_loss", loss.item(), total_train_step)
-
- # 测试步骤开始
- # 只对特定层起作用
- cyx.eval()
- # 看的是整个数据集的损失
- total_test_loss = 0
- total_accuracy = 0
- # 不累计梯度
- with torch.no_grad():
- for data in test_dataloader:
- imgs, targets = data
- outputs = cyx(imgs)
- loss = loss_fn(outputs, targets)
- total_test_loss = total_test_loss + loss.item()
- # 见截图
- accuracy = (outputs.argmax(1) == targets).sum()
- total_accuracy = total_accuracy + accuracy
-
- print("整体测试集上的Loss:{}".format(total_test_loss))
- print("整体测试集上的正确率:{}".format(total_accuracy/test_data_size))
- writer.add_scalar("test_loss", total_test_loss, total_test_step)
- writer.add_scalar("test_accuracy", total_accuracy/test_data_size, total_train_step)
- total_test_step = total_test_step + 1
-
- torch.save(cyx, "cyx_cpu_{}.pth".format(i))
- # torch.save(cyx.state_dict(), "cyx_{}.pth".format(i))
- print("模型已保存")
- writer.close()
-
-
-
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。