当前位置:   article > 正文

PyTorch学习笔记(一)基本建模流程_pytorch plot_function

pytorch plot_function

Environment

  • OS: macOS Mojave
  • Python version: 3.7
  • PyTorch version: 1.4.0
  • IDE: PyCharm


0. 写在前面

本文记录了使用 PyTorch 实现简单的线性回归和对率回归的基本流程,包括获得数据、建立模型和训练模型。之后数据和模型更复杂,但是这些 workflow 是一致的。

1. 线性回归

import numpy as np
import torch
import matplotlib.pyplot as plt
import seaborn as sns


class Config:
    """ 设置训练相关的参数 """
    def __init__(self):
        self.epochs = 5000  # 训练迭代数
        self.lr = 0.00003  # 学习率
        self.vis_interval = int(self.epochs / 10)  # 每隔这么多个 epoch 进行一次可视化作图


cfg = Config()

# ========== Step 1/3: 获取数据 ==========

torch.manual_seed(0)

x = torch.linspace(-10, 10, steps=50)
y = 2. * x + 1. + torch.randn(50) * 3  # add some noises

# ========== Step 2/3: 建立模型 ==========

torch.manual_seed(1)

w = torch.randn((1,), requires_grad=True)
b = torch.randn((1,), requires_grad=True)

# ========== Step 3/3: 训练模型 ==========

for epoch in range(1, cfg.epochs + 1):
    # ---------- step 1/3: 前向传播计算代价 ----------
    out = x * w + b
    loss = (0.5 * (out - y) ** 2).mean()

    # ---------- step 2/3: 后向传播(计算梯度)更新参数 ----------
    loss.backward()
    w.data.sub_(cfg.lr * w.grad)  # 梯度下降
    b.data.sub_(cfg.lr * b.grad)

    # ---------- step 3/3: 梯度清零 ----------
    w.grad.zero_()
    b.grad.zero_()

    # 可视化
    if epoch % cfg.vis_interval == 0:
        plt.scatter(x.data.numpy(), y.data.numpy())
        plt.plot(x.data.numpy(), out.data.numpy(), 'r-')
        plt.title("epoch: {} / {} loss: {:.3f}\nw: {:.3f} b: {:.3f}".format(
            epoch, cfg.epochs, loss.item(), w.item(), b.item())
        )
        plt.show()
        plt.pause(1.)

    print('epoch: {} / {} loss: {}'.format(epoch, cfg.epochs, loss))

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58

可视化结果如下(逐渐拟合)
在这里插入图片描述

2. 对率回归

import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import torch
from torch.nn import Module, Linear, Sigmoid
from torch.nn import BCELoss
from torch.optim import SGD


class Config:
    """ 设置训练相关的参数 """
    def __init__(self):
        self.epochs = 1000  # 训练迭代数
        self.lr = 0.01  # 学习率
        self.vis_interval = int(self.epochs / 10)  # 每隔这么多个 epoch 进行一次可视化作图


cfg = Config()

# ========== Step 1/3: 获取数据 ==========

torch.manual_seed(0)

num_features = 2

num_pos_samps, num_neg_samps = 40, 60
mean_pos, mean_neg = 4.0, 2.0
std_pos, std_neg = 1.5, 0.5
template_feat_pos = torch.ones(num_pos_samps, num_features)
template_feat_neg = torch.ones(num_neg_samps, num_features)

x_pos = torch.normal(mean_pos * template_feat_pos, std_pos)
x_neg = torch.normal(mean_neg * template_feat_neg, std_neg)
y_pos = torch.ones(num_pos_samps)
y_neg = torch.zeros(num_neg_samps)

x = torch.cat((x_pos, x_neg), dim=0)
y = torch.cat((y_pos, y_neg), dim=0)

# ========== Step 2/3: 建立模型 ==========


class LogisticRegression(Module):
    def __init__(self, in_features):
        super(LogisticRegression, self).__init__()

        self.linear = Linear(in_features=in_features, out_features=1)
        self.sigmoid = Sigmoid()

    def forward(self, x):
        return self.sigmoid(self.linear(x))


model = LogisticRegression(in_features=2)

# ========== Step 3/3: 训练模型 ==========

# step 1/3 选择合适的代价函数
criterion = BCELoss()  # 二分类交叉熵损失

# step 2/3 选择合适的优化器(和学习率调整策略)
optimizer = SGD(model.parameters(), lr=cfg.lr, momentum=0.9)  # 随机梯度下降

# step 3/3 迭代训练
for epoch in range(1, cfg.epochs + 1):
    # ---------- step 1/3: 前向传播计算代价 ----------
    out = model(x)
    loss = criterion(out.squeeze(), y)

    # ---------- step 2/3: 后向传播(计算梯度)更新参数 ----------
    loss.backward()
    optimizer.step()

    # ---------- step 3/3: 梯度清零 ----------
    optimizer.zero_grad()

    # print loss and accuracy for each epoch
    mask = out.ge(0.5).float().squeeze()  # 以 0.5 为阈值进行分类
    correct = (mask == y).sum()  # 正确预测的样本个数
    accuracy = correct.item() / y.size(0)  # 计算分类准确率
    print('epoch: {} / {} loss: {} accuracy: {}'.format(epoch, cfg.epochs, loss, accuracy))

    # 可视化
    sns.set_style('whitegrid')
    if epoch % cfg.vis_interval == 0:
        plt.scatter(x_neg.data.numpy()[:, 0], x_neg.data.numpy()[:, 1], c='g', label='negative')
        plt.scatter(x_pos.data.numpy()[:, 0], x_pos.data.numpy()[:, 1], c='b', label='positive')

        w0, w1 = model.linear.weight[0]
        w0, w1 = float(w0.item()), float(w1.item())
        plot_b = float(model.linear.bias[0].item())
        plot_x = np.arange(0, 7, 0.1)
        plot_y = (- w0 * plot_x - plot_b) / w1

        plt.plot(plot_x, plot_y, c='r')
        plt.title("epoch: {} / {} accuracy: {:.3f}\nw0: {:.3f} w1: {:.3f}".format(
            epoch, cfg.epochs, accuracy, w0, w1
        ))

        plt.show()
        plt.pause(1.)

  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34
  • 35
  • 36
  • 37
  • 38
  • 39
  • 40
  • 41
  • 42
  • 43
  • 44
  • 45
  • 46
  • 47
  • 48
  • 49
  • 50
  • 51
  • 52
  • 53
  • 54
  • 55
  • 56
  • 57
  • 58
  • 59
  • 60
  • 61
  • 62
  • 63
  • 64
  • 65
  • 66
  • 67
  • 68
  • 69
  • 70
  • 71
  • 72
  • 73
  • 74
  • 75
  • 76
  • 77
  • 78
  • 79
  • 80
  • 81
  • 82
  • 83
  • 84
  • 85
  • 86
  • 87
  • 88
  • 89
  • 90
  • 91
  • 92
  • 93
  • 94
  • 95
  • 96
  • 97
  • 98
  • 99
  • 100
  • 101
  • 102

可视化结果如下(逐渐拟合)
在这里插入图片描述

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/凡人多烦事01/article/detail/505394
推荐阅读
相关标签
  

闽ICP备14008679号