赞
踩
# 准备数据
x_data = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=-1)
y_data = x_data.pow(2)
# 通过matplotlib可视化生成的数据
# plt.scatter(x_data.numpy(), y_data.numpy())
# plt.show()
class Neuro_net(torch.nn.Module):
"""搭建神经网络"""
def __init__(self, n_feature, n_hidden_layer, n_output):
super(Neuro_net, self).__init__() # 继承__init__功能
# 隐藏层
self.hidden_layer = torch.nn.Linear(n_feature, n_hidden_layer)
# 输出层
self.output_layer = torch.nn.Linear(n_hidden_layer, n_output)
# 前向传播
def forward(self, x_data):
# 隐藏层出来后使用激励函数激活,最后返回预测值
hidden_layer_x = torch.relu(self.hidden_layer(x_data))
pridect_y = self.output_layer(hidden_layer_x)
return pridect_y
num_feature = 1 num_hidden_layer = 10 num_output = 1 epoch = 500 # 实例化神经网络 net = Neuro_net(num_feature, num_hidden_layer, num_output) # print(net) # 可查看网络结构 # optimizer 优化 学习率在这里设置 optimizer = torch.optim.SGD(net.parameters(), lr=0.2) # loss funaction loss_funaction = torch.nn.MSELoss() # train for step in range(epoch): pridect_y = net(x_data) # 喂入训练数据 得到预测的y值 loss = loss_funaction(pridect_y, y_data) # 计算损失 optimizer.zero_grad() # 为下一次训练清除上一步残余更新参数 loss.backward() # 误差反向传播,计算梯度 optimizer.step() # 将参数更新值施加到 net 的 parameters 上
import matplotlib.pyplot as plt plt.ion() # 画图 for step in range(epoch): ... loss.backward() optimizer.step() if step % 5 == 0: print("已训练{}步 | loss:{}。".format(step, loss)) plt.cla() plt.scatter(x_data.numpy(), y_data.numpy()) plt.plot(x_data.numpy(), pridect_y.data.numpy(), color="green", marker="o", linewidth=6, label="predict_line") plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 15, 'color': 'red'}) plt.pause(0.1) plt.ioff() plt.show()
import torch import matplotlib.pyplot as plt class Neuro_net(torch.nn.Module): """搭建神经网络""" def __init__(self, n_feature, n_hidden_layer, n_output): super(Neuro_net, self).__init__() # 继承__init__功能 self.hidden_layer = torch.nn.Linear(n_feature, n_hidden_layer) self.output_layer = torch.nn.Linear(n_hidden_layer, n_output) def forward(self, x_data): hidden_layer_x = torch.relu(self.hidden_layer(x_data)) pridect_y = self.output_layer(hidden_layer_x) return pridect_y # 准备数据 x_data = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=-1) y_data = x_data.pow(2) # 通过matplotlib可视化生成的数据 # plt.scatter(x_data.numpy(), y_data.numpy()) # plt.show() num_feature = 1 num_hidden_layer = 10 num_output = 1 epoch = 500 # 实例化神经网络 net = Neuro_net(num_feature, num_hidden_layer, num_output) # print(net) # optimizer 优化 optimizer = torch.optim.SGD(net.parameters(), lr=0.2) # loss funaction loss_funaction = torch.nn.MSELoss() plt.ion() # train for step in range(epoch): pridect_y = net(x_data) # 喂入训练数据 得到预测的y值 loss = loss_funaction(pridect_y, y_data) # 计算损失 optimizer.zero_grad() # 为下一次训练清除上一步残余更新参数 loss.backward() # 误差反向传播,计算梯度 optimizer.step() # 将参数更新值施加到 net 的 parameters 上 if step % 5 == 0: print("已训练{}步 | loss:{}。".format(step, loss)) plt.cla() plt.scatter(x_data.numpy(), y_data.numpy()) plt.plot(x_data.numpy(), pridect_y.data.numpy(), color="green", marker="o", linewidth=6, label="predict_line") plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 13, 'color': 'red'}) plt.pause(0.1) plt.ioff() plt.show()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。