当前位置:   article > 正文

Pytorch模型实战:(经典)感知机_pytorch 感知机

pytorch 感知机

Implementation of the classic Perceptron by Frank Rosenblatt for binary classification (here: 0/1 class labels).
Frank Rosenblatt实现的经典感知机用于二进制分类(此处:0/1类标签)

一、数据处理

import numpy as np
import matplotlib.pyplot as plt
import torch
# data为100个数据,x为二维数据,y为标签(0/1)
data = np.genfromtxt('data/perceptron_toydata.txt', delimiter='\t')
X, y = data[:, :2], data[:, 2] # x要所有行和0、1列。y要所有行和2列
y = y.astype(np.int64) # 数据类型转换
print(data)
print('Class label counts:', np.bincount(y)) # 统计标签中0,1的个数r
print('X.shape:', X.shape)
print('y.shape:', y.shape)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
# 对数据进行洗牌
shuffle_idx = np.arange(y.shape[0])
shuffle_rng = np.random.RandomState(123)
shuffle_rng.shuffle(shuffle_idx)
X, y = X[shuffle_idx], y[shuffle_idx]
# 切分数据
X_train, X_test = X[shuffle_idx[:70]], X[shuffle_idx[70:]]
y_train, y_test = y[shuffle_idx[:70]], y[shuffle_idx[70:]]
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
# 让数据符合正态分布,进行归一化
mu, sigma = X_train.mean(axis=0), X_train.std(axis=0)
X_train = (X_train - mu) / sigma
X_test = (X_test - mu) / sigma
  • 1
  • 2
  • 3
  • 4
# 数据的可视化处理
plt.scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], label='class 0', marker='o')
plt.scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], label='class 1', marker='s')
plt.xlabel('feature 1')
plt.ylabel('feature 2')
plt.legend()
plt.show()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7

数据可视化的结果
二、定义感知机模型

# 准备将torch放在GPU上
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
  • 1
  • 2
def custom_where(cond, x_1, x_2):
    return (cond * x_1) + ((1 - cond) * x_2
  • 1
  • 2
class Perceptron(): # 定义感知器模型
# 构造方法:
# num_features表示输入数据的特征,本例中输入为(100,2)
# weights表示权重,(特征,1),本例中为(2,1)这样保证最后的输出为(100,1)
# bias表示偏执
    def __init__(self, num_features):
        self.num_features = num_features
        self.weights = torch.zeros(num_features, 1,
                                   dtype=torch.float32, device=device)
        self.bias = torch.zeros(1, dtype=torch.float32, device=device)

    def forward(self, x):# 定义前向传播函数
        linear = torch.add(torch.mm(x, self.weights), self.bias)
        predictions = custom_where(linear > 0., 1, 0).float()
        return predictions

    def backward(self, x, y):# 定义后向传播
        predictions = self.forward(x)
        errors = y - predictions
        return errors

    def train(self, x, y, epochs): # 训练
        for e in range(epochs):

            for i in range(y.size()[0]):
                # use view because backward expects a matrix (i.e., 2D tensor)
                errors = self.backward(x[i].view(1, self.num_features), y[i]).view(-1)
                self.weights += (errors * x[i]).view(self.num_features, 1)
                self.bias += errors

    def evaluate(self, x, y): #评估
        predictions = self.forward(x).view(-1)
        accuracy = torch.sum(predictions == y).float() / y.size()[0]
        return accuracy
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24
  • 25
  • 26
  • 27
  • 28
  • 29
  • 30
  • 31
  • 32
  • 33
  • 34

三、训练模型

# 初始化类
ppn = Perceptron(num_features=2)
# 将numpy数据tensor化
X_train_tensor = torch.tensor(X_train, dtype=torch.float32, device=device)
y_train_tensor = torch.tensor(y_train, dtype=torch.float32, device=device)
# 训练模型,训练5次
ppn.train(X_train_tensor, y_train_tensor, epochs=5)

print('Model parameters:')
print('  Weights: %s' % ppn.weights)
print('  Bias: %s' % ppn.bias)
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11

四、模型评估

X_test_tensor = torch.tensor(X_test, dtype=torch.float32, device=device)
y_test_tensor = torch.tensor(y_test, dtype=torch.float32, device=device)

test_acc = ppn.evaluate(X_test_tensor, y_test_tensor)
print('Test set accuracy: %.2f%%' % (test_acc*100))
  • 1
  • 2
  • 3
  • 4
  • 5

五、图形化

w, b = ppn.weights, ppn.bias

x_min = -2
y_min = ( (-(w[0] * x_min) - b[0]) 
          / w[1] )

x_max = 2
y_max = ( (-(w[0] * x_max) - b[0]) 
          / w[1] )


fig, ax = plt.subplots(1, 2, sharex=True, figsize=(7, 3))

ax[0].plot([x_min, x_max], [y_min, y_max])
ax[1].plot([x_min, x_max], [y_min, y_max])

ax[0].scatter(X_train[y_train==0, 0], X_train[y_train==0, 1], label='class 0', marker='o')
ax[0].scatter(X_train[y_train==1, 0], X_train[y_train==1, 1], label='class 1', marker='s')

ax[1].scatter(X_test[y_test==0, 0], X_test[y_test==0, 1], label='class 0', marker='o')
ax[1].scatter(X_test[y_test==1, 0], X_test[y_test==1, 1], label='class 1', marker='s')

ax[1].legend(loc='upper left')
plt.show()
  • 1
  • 2
  • 3
  • 4
  • 5
  • 6
  • 7
  • 8
  • 9
  • 10
  • 11
  • 12
  • 13
  • 14
  • 15
  • 16
  • 17
  • 18
  • 19
  • 20
  • 21
  • 22
  • 23
  • 24

最终结果

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/花生_TL007/article/detail/327663
推荐阅读
相关标签
  

闽ICP备14008679号