赞
踩
学习视频:https://www.bilibili.com/video/BV1hE411t7RN?p=1,内含环境搭建
理论部分详见视频,本文只关注于代码实现,官方文档
import torch
from torch.nn import L1Loss # L1Loss是计算平均绝对误差
from torch import nn
# 计算要求float,用dtype来指定的tensor数据类型
inputs = torch.tensor([1,2,3],dtype = torch.float32)
targets = torch.tensor([1,2,5],dtype = torch.float32)
# 要注意输入尺寸,可以在官方文档中对应函数的"shape"中查看
inputs = torch.reshape(inputs,(1,1,1,3))
targets = torch.reshape(targets,(1,1,1,3))
loss = L1Loss(reduction = "mean") # 计算平均值,"sum"为求和
result = loss(inputs,targets)
print(result)
计算结果:
1-1 = 0
2-2 = 0
3 - 5 = 2 取了绝对值
( 0 + 0 + 2 )/ 3 = 0.6666...
输出结果
tensor(0.6667)
x = torch.tensor([0.1,0.2,0.3])
y = torch.tensor([1])
x = torch.reshape(x,(1,3))
loss_cross = nn.CrossEntropyLoss()
result_cross = loss_cross(x,y)
print(result_cross)
tensor(1.1019)
通过损失函数,我们就可以通过反向传播来更新我们的参数
# 引入Sequential from torch import nn import torchvision from torch.nn import Conv2d,MaxPool2d,Flatten,Linear,Sequential from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter # 读取数据 dataset = torchvision.datasets.CIFAR10(root = "./CIFAR10_Dataset" ,train = False ,transform = torchvision.transforms.ToTensor() ,download = False) dataloader = DataLoader(dataset,batch_size = 1) # 构建神经网络 class Tudui(nn.Module): def __init__(self): super(Tudui,self).__init__() self.model1 = Sequential( Conv2d(3,32,5,padding = 2), MaxPool2d(2), Conv2d(32,32,5,padding = 2), MaxPool2d(2), Conv2d(32,64,5,padding = 2), MaxPool2d(2), Flatten(), Linear(1024,64), Linear(64,10) ) def forward(self,x): x = self.model1(x) return x # 创建交叉熵实例 loss_cross = nn.CrossEntropyLoss() # 创建神经网络 tudui = Tudui() # 处理数据 for data in dataloader: imgs,targets = data outputs = tudui(imgs) result_loss = loss_cross(outputs,targets) # 损失函数的输出会自带一个反向传播函数,我们调用即可 # 调用loss.backward()就会更新神经网络中的一些参数的梯度grad result_loss.backward()
通过优化器,把损失函数的值减小,官方文档
# 引入Sequential import torch from torch import nn from torch.nn import Conv2d,MaxPool2d,Flatten,Linear,Sequential from torch.utils.data import DataLoader from torch.utils.tensorboard import SummaryWriter # 读取数据 dataset = torchvision.datasets.CIFAR10(root = "./CIFAR10_Dataset" ,train = False ,transform = torchvision.transforms.ToTensor() ,download = False) dataloader = DataLoader(dataset,batch_size = 1) # 构建网络 class Tudui(nn.Module): def __init__(self): super(Tudui,self).__init__() self.model1 = Sequential( Conv2d(3,32,5,padding = 2), MaxPool2d(2), Conv2d(32,32,5,padding = 2), MaxPool2d(2), Conv2d(32,64,5,padding = 2), MaxPool2d(2), Flatten(), Linear(1024,64), Linear(64,10) ) def forward(self,x): x = self.model1(x) return x # 实例化损失函数 loss_cross = nn.CrossEntropyLoss() # 实例化神经网络 tudui = Tudui() # 实例化优化器 optim = torch.optim.SGD(tudui.parameters(),lr = 0.01) # 把模型参数放进去,会自动识别所要调优的参数,SGD为常用的随机梯度下降 for epoch in range(20): # 每一遍过一次数据集然后更新参数,过20遍 running_loss = 0.0 for data in dataloader: imgs,targets = data outputs = tudui(imgs) result_loss = loss_cross(outputs,targets) # 计算出loss optim.zero_grad() # 将上一步求得的loss清零清零,必须要写 result_loss.backward() # 求出每个节点梯度 optim.step() # 根据每个节点的梯度进行调优 running_loss = running_loss + result_loss print(running_loss)
输出结果:
tensor(18668.8418, grad_fn=<AddBackward0>)
tensor(16092.8223, grad_fn=<AddBackward0>)
···
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。