赞
踩
pytorch实现一元线性,回归代码如下:
# pytorch实现一元线性回归 import numpy as np import matplotlib.pyplot as plt import torch from torch import nn, optim from torch.autograd import Variable # 定义数据集 x_data = np.random.rand(100) noise = np.random.normal(0, 0.01, x_data.shape) # 噪音 y_data = x_data * 2 + noise + 1 plt.scatter(x_data, y_data) plt.rcParams['font.sans-serif'] = 'SimHei' # 设置中文 plt.title('线性回归') plt.xlabel('x') plt.ylabel('y') plt.xlim([0, 1.0]) plt.ylim([0, 3.0]) plt.grid() plt.show() x_tensor = torch.FloatTensor(x_data.reshape(-1, 1)) y_tensor = torch.FloatTensor(y_data.reshape(-1, 1)) # print(x_tensor) # print(y_tensor) inputs = Variable(x_tensor) target = Variable(y_tensor) print(inputs) print(target) # 构建神经网络模型 class LinearRegression(nn.Module): def __init__(self): super(LinearRegression, self).__init__() self.fc = nn.Linear(1, 1) def forward(self, x): return self.fc(x) model = LinearRegression() gpu = torch.device('cuda') mse_loss = nn.MSELoss() # 均方差损失函数 optimizer = optim.SGD(model.parameters(), lr=0.01) # 随机梯度下降法 def train(): for i in range(20001): out = model(inputs) # 计算模型输出结果 loss = mse_loss(out, target) # 损失函数 optimizer.zero_grad() # 梯度清零 loss.backward() # 计算梯度 optimizer.step() # 修改权值 if i % 100 == 0: print(i, loss.item(), sep='\t') train() # 查看模型效果 y_predict = model(inputs) plt.scatter(x_data, y_data) plt.plot(x_data, y_predict.data.numpy(), 'r-', lw=3) plt.title('神经网络拟合线性回归') plt.xlabel('x') plt.ylabel('y') plt.xlim([0, 1.0]) plt.ylim([0, 3.0]) plt.grid() plt.show()
效果如图:
pytorch实现多元线性回归(以波士顿房价数据集为例),代码如下:
# pytorch多元线性回归 import torch from torch import nn, optim from torch.autograd import Variable from sklearn.datasets import load_boston # 定义数据集 boston_dataset = load_boston() print(boston_dataset['data'].shape) # (506,13) print(boston_dataset['target'].shape) # (506,) x_data = boston_dataset['data'] y_data = boston_dataset['target'] x_tensor = torch.FloatTensor(x_data) y_tensor = torch.FloatTensor(y_data.reshape(-1, 1)) inputs = Variable(x_tensor) target = Variable(y_tensor) # 构建神经网络模型 class LinearRegression(nn.Module): def __init__(self): super(LinearRegression, self).__init__() self.layer1 = nn.Linear(13, 1) def forward(self, x): return self.layer1(x) model = LinearRegression() gpu = torch.device('cuda') mse_loss = nn.MSELoss() optimizer = optim.Adam(model.parameters(), lr=0.003) def train(): for i in range(1001): out = model(inputs) # 计算模型输出结果 loss = mse_loss(out, target) # 损失函数 optimizer.zero_grad() # 梯度清零 loss.backward() # 计算权值 optimizer.step() # 修改权值 if i % 20 == 0: print(i, loss.item(), sep='\t') train() # 训练 for parameter in model.parameters(): print(parameter)
效果如下:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。