赞
踩
Pytorch是一个基于Numpy的科学计算包
pip install torch torchvision torchaudio
无初始化数据
x = torch.empty(5, 3)
# 结果
tensor([[-4.6138e+20, 5.8574e-43, 0.0000e+00],
[ 0.0000e+00, -4.6138e+20, 5.8574e-43],
[-4.6138e+20, 5.8574e-43, -4.6138e+20],
[ 5.8574e-43, -4.6138e+20, 5.8574e-43],
[-4.6138e+20, 5.8574e-43, -4.6138e+20]])
有初始化数据
x1 = torch.rand(5, 3)
# 结果
tensor([[0.7834, 0.1161, 0.6895],
[0.6038, 0.3004, 0.2812],
[0.1132, 0.9600, 0.8586],
[0.6652, 0.4563, 0.5422],
[0.7631, 0.2071, 0.1515]])
对比有无初始化矩阵:
全零矩阵/全一矩阵
# 全零 类型long
x2 = torch.zeros(5, 3, dtype=torch.long)
# 全1
x = torch.ones(5, 3, dtype=torch.double)
通过数据创建
x = torch.tensor([2.5, 3.5])
通过已有张量创建
# 数据随机,形状与x相同
y = torch.randn_like(x, dtype=torch.float32)
# 查看形状
y.size()
加法操作
x+y
torch.add(x,y)
torch.add(x,y,out=result)
y.add_(x)
切片
print(x[:, 1])
改变张量形状
# 元素总量不可改变,-1自动匹配数量
x = torch.randn(4, 4)
y = x.view(16)
z = x.view(-1, 8)
print(x.size(), y.size(), z.size())
item取出
# tensor 只有一个元素时,item()取出元素
x = torch.randn(1)
print(x)
print(x.item())
Tensor与ndarray转换
# tensor.numpy(): tensor转换成ndarray
a = torch.ones(5)
print(a.numpy())
# from_numpy(): ndarray转换成tensor
a = np.ones(5)
b = torch.from_numpy(a)
print(b)
Cuda Tensor
Tensors可以用.to()方法移动到任意设备上
device = torch.device("cuda")
x.to(device)
x.to("cpu")
x = torch.ones(2, 2, requires_grad=True)
print(x)
y = x + 2
print(y)
print(x.grad_fn)
print(y.grad_fn)
在Pytorch中,反向传播是依靠.backward()实现的
x = torch.ones(2, 2, requires_grad=True)
y = x + 2
z = y * y * 3
out = z.mean()
print(out)
out.backward()
print(x.grad)
# 导入包 import torch import torch.nn as nn import torch.nn.functional as F # 定义神经网络类 class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 定义卷积层1 ,输入维度1,输出维度6,卷积核大小 3*3 self.conv1 = nn.Conv2d(1, 6, 3) # 卷积层2,输入维度6,输出维度16,卷积核大小 3*3 self.conv2 = nn.Conv2d(6, 16, 3) # 全连接层 self.fc1 = nn.Linear(16 * 6 * 6, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): # 卷积、激活、池化 x = F.max_pool2d(F.relu(self.conv1(x)), (2 * 2)) x = F.max_pool2d(F.relu(self.conv2(x)), (2 * 2)) # 调整张量形状 x = x.view(-1, self.num_flat_features(x)) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x def num_flat_features(self, x): size = x.size()[1:] num_features = 1 for s in size: num_features *= s return num_features net = Net() print(net)
net.parameters()获取网络参数
params = list(net.parameters())
print(len(params))
print(params[0].size())
注意torch.nn构建的神经网络只支持mini-batches的输入,不支持单一样本输入
假设图像尺寸32*32
input1 = torch.randn(1, 1, 32, 32)
out = net(input1)
print(out)
print(out.size())
损失函数的输入是一个输入的pair(output,target),然后计算出一个数值来评估output和target之间的差距大小
在torch.nn中有若干不同的损失函数可供使用,比如nn.MSELoss就是通过计算均方误差来评估输入和目标值之间的差距
应用nn.MSELoss计算损失
target = torch.randn(1, 10)
criterion = nn.MSELoss()
loss = criterion(out, target)
print(loss)
print(loss.grad_fn)
print(loss.grad_fn.next_functions[0][0])
print(loss.grad_fn.next_functions[0][0].next_functions[0][0])
在Pytorch中执行反向传播非常简便,全部的操作就是loss.backward()
在执行反向传播之前,要将梯度清零,否则梯度会在不同的批次数据之间被累加
# 梯度清零
net.zero_grad()
print(net.conv1.bias.grad)
# 反向传播,自动求导
loss.backward()
print(net.conv1.bias.grad)
最简单的算法SGD(随机梯度下降)
Pytorch优化器
import torch.optim as optim # 构建优化器 optimizer = optim.SGD(net.parameters(), lr=0.01) # 优化器梯度清零 optimizer.zero_grad() # 执行网络计算 input1 = torch.randn(1, 1, 32, 32) output = net(input1) # 计算损失 target = torch.randn(1, 10) criterion = nn.MSELoss() loss = criterion(output, target) # 反向传播 loss.backward() # 更新参数 optimizer.step()
使用torchvision下载CIFAR10数据集
import torch import torchvision import torchvision.transforms as tf import torch.nn as nn import torch.nn.functional as F def down_datasets(): transform = tf.Compose([tf.ToTensor(), tf.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))]) train_set = torchvision.datasets.CIFAR10(root='./data', train=True, download=True, transform=transform) train_loader = torch.utils.data.DataLoader(train_set, batch_size=4, shuffle=True, num_workers=2) test_set = torchvision.datasets.CIFAR10(root='./data', train=False, download=True, transform=transform) test_loader = torch.utils.data.DataLoader(test_set, batch_size=4, shuffle=False, num_workers=2) classes = ('plane', 'car', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck')
定义卷积神经网络
class Net(nn.Module): def __init__(self): super(Net, self).__init__() # 定义两个卷积层 self.conv1 = nn.Conv2d(3, 6, 5) self.conv2 = nn.Conv2d(6, 16, 5) # 定义池化层 self.pool = nn.MaxPool2d(2, 2) # 定义全连接层 self.fc1 = nn.Linear(16 * 5 * 5, 120) self.fc2 = nn.Linear(120, 84) self.fc3 = nn.Linear(84, 10) def forward(self, x): x = self.pool(F.relu(self.conv1(x))) x = self.pool(F.relu(self.conv2(x))) # 变换x的形状 x = x.view(-1, 16 * 5 * 5) x = F.relu(self.fc1(x)) x = F.relu(self.fc2(x)) x = self.fc3(x) return x
定义损失函数
net = Net()
# 定义损失函数,选用交叉熵损失函数
criterion = nn.CrossEntropyLoss()
# 定义优化器
optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9)
在训练集上训练模型
def train(net, train_loader, criterion, optimizer): """ 训练模型 :return: """ print("开始训练") for epoch in range(2): running_loss = 0.0 # 按批次迭代训练模型 for i, data in enumerate(train_loader, 0): # 从data中取出含有输入图像的张量inputs,标签张量labels inputs, labels = data # 梯度清零 optimizer.zero_grad() # 输入网络,得到输出张量 outputs = net(inputs) labels = F.one_hot(labels, num_classes=10).float() # 计算损失值 loss = criterion(outputs, labels) # 反向传播,梯度更新 loss.backward() optimizer.step() # 打印训练信息 # print(loss.item()) running_loss += loss.item() if (i + 1) % 200 == 0: print('[%d, %5d] loss %.3f' % (epoch + 1, i + 1, running_loss / 200)) running_loss = 0.0 print("训练结束") def save_model(net): """ 保存模型 :param net: :return: """ PATH = './cifar_net.pth' torch.save(net.state_dict(), PATH) def run(): net = Net() # 定义损失函数,选用交叉熵损失函数 criterion = nn.CrossEntropyLoss() # 定义优化器 optimizer = optim.SGD(net.parameters(), lr=0.01, momentum=0.9) # 开始训练 train(net, train_loader, criterion, optimizer) # 保存模型 save_model(net)
torch.save(net.state_dict(),"./cifar_net.pth")
在测试集上测试模型
def test_model(): """ 测试模型 :return: """ dataiter = iter(test_loader) images, labels = dataiter.next() print(''.join('%5s ' % classes[labels[j]] for j in range(4))) # 实例化模型对象 net = Net() # 加载模型状态字典 net.load_state_dict(torch.load("./cifar_net.pth")) # 预测 outputs = net(images) # 选取概率最大的类别 _, predicted = torch.max(outputs, 1) # 打印预测标签 print(''.join('%5s ' % classes[predicted[j]] for j in range(4)))
def batch_test_model(): """ 在整个测试集上测试模型准确率 :return: """ correct = 0 total = 0 # 实例化模型对象 net = Net() # 加载模型状态字典 net.load_state_dict(torch.load("./cifar_net.pth")) with torch.no_grad(): for data in test_loader: images, labels = data # 预测 outputs = net(images) # 选取概率最大的类别 _, predicted = torch.max(outputs.data, 1) total += labels.size(0) correct += (predicted == labels).sum().item() print("准确率:%d %%" % (100 * correct / total))
在10个类别哪些表现更好
def batch_class_test_model(): """ 在整个测试集上测试模型每个类别的准确率 :return: """ # 实例化模型对象 net = Net() # 加载模型状态字典 net.load_state_dict(torch.load("./cifar_net.pth")) class_correct = list(0. for i in range(10)) class_total = list(0. for i in range(10)) with torch.no_grad(): for data in test_loader: images, labels = data # 预测 outputs = net(images) # 选取概率最大的类别 _, predicted = torch.max(outputs.data, 1) c = (predicted == labels).squeeze() for i in range(4): label = labels[i] class_correct[label] += c[i].item() class_total[label] += 1 for i in range(10): print("%5s准确率:%d %%" % (classes[i], 100 * class_correct[i] / class_total[i]))
device = torch.device("cuda:0" if torch.cuda.is_availabe() else "cpu")
# 模型转移到GPU
net.to(device)
# 数据转移
inputs, labels = data[0].to(device), data[1].to(device)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。