赞
踩
--------------------- Pytorch 与 numpy 区别----------------------------
#########################################################
#########################################################
Numpy与torch 数据格式的转换
Torch为tensor 张量的形式
- np_data = np.arange(6).reshape((2, 3))
- torch_data = torch.from_numpy(np_data)
- tensor2array = torch_data.numpy()
- print(
- '\nnumpy array:', np_data, # [[0 1 2], [3 4 5]]
- '\ntorch tensor:', torch_data, # 0 1 2 \n 3 4 5 [torch.LongTensor of size 2x3]
- '\ntensor to array:', tensor2array, # [[0 1 2], [3 4 5]]
- )
运行结果:
绝对值,平均值,sin值 写法都是一样基本
需要先将数据转换成tensor的形式 torch.FloatTensor(data)
- # abs
-
- data = [-1, -2, 1, 2]
-
- tensor = torch.FloatTensor(data) # 32-bit floating point
-
- print(
-
- '\nabs',
-
- '\nnumpy: ', np.abs(data), # [1 2 1 2]
-
- '\ntorch: ', torch.abs(tensor) # [1 2 1 2]
-
- )
-
-
-
- # sin
-
- print(
-
- '\nsin',
-
- '\nnumpy: ', np.sin(data), # [-0.84147098 -0.90929743 0.84147098 0.90929743]
-
- '\ntorch: ', torch.sin(tensor) # [-0.8415 -0.9093 0.8415 0.9093]
-
- )
-
-
-
- # mean
-
- print(
-
- '\nmean',
-
- '\nnumpy: ', np.mean(data), # 0.0
-
- '\ntorch: ', torch.mean(tensor) # 0.0
-
- )

矩阵乘法
原始数据的话,Numpy是直接np.matmul()
或者将原始数据变为array的形式 data = np.array(data) 然后用numpy中另一种矩阵乘法的形式 data.dot(data)
Torch 计算矩阵乘法为 torch.mm(tensor,tensor)
- # matrix multiplication
-
- data = [[1,2], [3,4]]
-
- tensor = torch.FloatTensor(data) # 32-bit floating point
-
- # correct method
-
- print(
-
- '\nmatrix multiplication (matmul)',
-
- '\nnumpy: ', np.matmul(data, data), # [[7, 10], [15, 22]]
-
- '\ntorch: ', torch.mm(tensor, tensor) # [[7, 10], [15, 22]]
-
- )
-
- # incorrect method
-
- data = np.array(data)
-
- print(
-
- '\nmatrix multiplication (dot)',
-
- '\nnumpy: ', data.dot(data), # [[7, 10], [15, 22]]
-
- '\ntorch: ', tensor.dot(tensor) # this will convert tensor to [1,2,3,4], you'll get 30.0
-
- )

-------------------------variable-----------------------------------------------
##################################################################
##################################################################
-
- tensor = torch.FloatTensor([[1,2],[3,4]])
-
- variable = Variable(tensor,requires_grad=True) #false 反向传播时不会计算当前节点的梯度
-
- print(tensor)
-
- print(variable)
-
-
-
- print(tensor*tensor)
-
-
-
- t_out = torch.mean(tensor*tensor)
-
- v_out = torch.mean(variable*variable)
-
- print(t_out)
-
- print(v_out)
-
-
-
- v_out.backward() # backpropagation from v_out
-
- # v_out = 1/4 * sum(variable*variable)
-
- # the gradients w.r.t the variable, d(v_out)/d(variable) = 1/4*2*variable = variable/2
-
- print(variable.grad)
-
-
-
- print(variable) # this is data in variable format
-
-
-
- print(variable.data) # this is data in tensor format 转换成tensor
-
-
-
- print(variable.data.numpy()) # numpy format 将variables 转出成numpy需要转成成tensor 再换成numpy

---------------------------activation-------------------------------------------
##################################################################
##################################################################
先通过torch.linspace() 生成一些假的点
再x.data.numpy() 变成numpy array的形式
因为画图的时候 matplot 需要识别numpy的数据
再绘制torch自带的激活函数
完整程序:
-
- # fake data
-
- x = torch.linspace(-5, 5, 200) # x data (tensor), shape=(100, 1)
-
- x = Variable(x)
-
- x_np = x.data.numpy() # numpy array for plotting 画图的时候 matplot 需要识别numpy的数据
-
-
-
- # following are popular activation functions
-
- y_relu = torch.relu(x).data.numpy()
-
- y_sigmoid = torch.sigmoid(x).data.numpy()
-
- y_tanh = torch.tanh(x).data.numpy()
-
- y_softplus = F.softplus(x).data.numpy() # there's no softplus in torch
-
- # y_softmax = torch.softmax(x, dim=0).data.numpy() softmax is a special kind of activation function, it is about probability
-
-
-
- # plt to visualize these activation function
-
- plt.figure(1, figsize=(8, 6))
-
- plt.subplot(221)
-
- plt.plot(x_np, y_relu, c='red', label='relu')
-
- plt.ylim((-1, 5))
-
- plt.legend(loc='best')
-
-
-
- plt.subplot(222)
-
- plt.plot(x_np, y_sigmoid, c='red', label='sigmoid')
-
- plt.ylim((-0.2, 1.2))
-
- plt.legend(loc='best')
-
-
-
- plt.subplot(223)
-
- plt.plot(x_np, y_tanh, c='red', label='tanh')
-
- plt.ylim((-1.2, 1.2))
-
- plt.legend(loc='best')
-
-
-
- plt.subplot(224)
-
- plt.plot(x_np, y_softplus, c='red', label='softplus')
-
- plt.ylim((-0.2, 6))
-
- plt.legend(loc='best')
-
-
-
- plt.show()

运行结果:
---------------------------regression-------------------------------------------
###################################################################################################################################
先通过torch.linespace() 生成数据 模拟拟合曲线
再建类 搭建简单的网络结构
实例化网络
定义优化器和损失函数
训练
torch.unsqueeze() 将原始数据1维变成2维,因为在torch中只能处理2维的数据
Net类的固定写法 记住就好
- class Net(torch.nn.Module): #继承torch.nn.Module的模块
-
- def __init__(self, n_feature, n_hidden, n_output):
-
- super(Net, self).__init__()
-
- self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer torch.nn.Linear 返回的是一个方法
-
- self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
-
-
-
- def forward(self, x):
-
- x = F.relu(self.hidden(x)) # activation function for hidden layer
-
- x = self.predict(x) # linear output
-
- return x

- optimizer.zero_grad() # 每次训练的梯度清零
-
- loss.backward() # 反向传播,计算梯度
-
- optimizer.step() # 作用到每步上
完整程序:
-
- # torch.manual_seed(1) # reproducible
-
-
-
- x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1) 将1维数据变成2维 因为在torch中只能处理2维的数据
-
- y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
-
-
-
- # torch can only train on Variable, so convert them to Variable
-
- # The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
-
- # x, y = Variable(x), Variable(y)
-
-
-
- # plt.scatter(x.data.numpy(), y.data.numpy()) #打印散点图
-
- # plt.show()
-
-
-
-
-
- class Net(torch.nn.Module): #继承torch.nn.Module的模块
-
- def __init__(self, n_feature, n_hidden, n_output):
-
- super(Net, self).__init__()
-
- self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer torch.nn.Linear 返回的是一个方法
-
- self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
-
-
-
- def forward(self, x):
-
- x = F.relu(self.hidden(x)) # activation function for hidden layer
-
- x = self.predict(x) # linear output
-
- return x
-
-
-
- net = Net(n_feature=1, n_hidden=10, n_output=1) # define the network
-
- print(net) # net architecture 会输出搭建的网络的结构
-
-
-
- optimizer = torch.optim.SGD(net.parameters(), lr=0.2)
-
- loss_func = torch.nn.MSELoss() # this is for regression mean squared loss 回归问题用均方差误差就可以了
-
-
-
- plt.ion() # something about plotting
-
-
-
- for t in range(200):
-
- prediction = net(x) # input x and predict based on x 这里的net调用的是forward函数
-
-
-
- loss = loss_func(prediction, y) # must be (1. nn output, 2. target)
-
-
-
- optimizer.zero_grad() # clear gradients for next train 每次训练的梯度清零
-
- loss.backward() # backpropagation, compute gradients
-
- optimizer.step() # apply gradients
-
-
-
- if t % 5 == 0:
-
- # plot and show learning process
-
- plt.cla()
-
- plt.scatter(x.data.numpy(), y.data.numpy())
-
- plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
-
- plt.text(0.5, 0, 'Loss=%.4f' % loss.data.numpy(), fontdict={'size': 20, 'color': 'red'})
-
- plt.pause(0.1)
-
-
-
- plt.ioff()
-
- plt.show()

运行结果:
---------------------------classification-----------------------------------------
####################################################################################################################################
合并数据 torch.cat() x默认是FloatTensor的形式
Y默认是LongTensor的形式
x = torch.cat((x0, x1), 0).type(torch.FloatTensor)
y = torch.cat((y0, y1), ).type(torch.LongTensor)
回归问题就用MSE 均方误差 分类问题的话就要使用CrossEntropyLoss的损失函数
optimizer.zero_grad() #清空上一步的残余更新参数值
loss.backward() #误差反向传播, 计算参数更新值
optimizer.step() # 将参数更新值施加到 net 的 parameters 上
torch.max(out, 1)[1]
# torch.max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor) 按维度dim 返回最大值
# torch.max(out, 1) 表示返回每一行中最大值的那个元素,且返回其索引(返回最大元素在这一行的列索引) index [0,1] ==> [元素,索引值]
# 这里第一个1 表示维度的意思 第二个1 表示获得第二个位置的东西
# 前面损失函数是CrossEntropyLoss 计算的softmax概率值
# 这里,因为上面合并数据后,x是每一行都有一个x0的点,一个x1的点 y是对应的标签x0是0 x1标签是1
# 所以softmax计算完的概率 通过 torch.max(out, 1)[1] 得到每个点最大概率的索引值也是0或者1 正好和标签0,1对应
完整程序:
-
- # torch.manual_seed(1) # reproducible
-
- # make fake data
- n_data = torch.ones(100, 2) # 数据的基本形态
- x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2) 正态分布
- y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1) 给x0的标签,都是0
- x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2) 正态分布
- y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1) 给x1的标签,都是1
- # 注意 x, y 数据的数据形式是一定要像下面一样 (torch.cat 是在合并数据)
- # 按列合并数据 x0是第一列,x1是第二列 所以每一行是有一个x0的点 一个x1的点 所以下面 输入值n_feature=2
- x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating 在torch中 它的数据默认一定是FloatTensor的形式
- y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer 在torch中 它的标签默认一定是LongTenser的形式
-
- # The code below is deprecated in Pytorch 0.4. Now, autograd directly supports tensors
- # x, y = Variable(x), Variable(y)
-
- # plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
- # plt.show()
-
-
- class Net(torch.nn.Module):
- def __init__(self, n_feature, n_hidden, n_output):
- super(Net, self).__init__()
- self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
- self.out = torch.nn.Linear(n_hidden, n_output) # output layer
-
- def forward(self, x):
- x = F.relu(self.hidden(x)) # activation function for hidden layer
- x = self.out(x)
- return x
-
- net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network
- print(net) # net architecture
-
- optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
- loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted 回归问题就用MSE 均方误差 分类问题的话就要使用CrossEntropyLoss的损失函数
-
- plt.ion() # something about plotting
-
- for t in range(100):
- out = net(x) # input x and predict based on x
- loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
-
- optimizer.zero_grad() # clear gradients for next train 清空上一步的残余更新参数值
- loss.backward() # backpropagation, compute gradients 误差反向传播, 计算参数更新值
- optimizer.step() # apply gradients 将参数更新值施加到 net 的 parameters 上
-
- if t % 2 == 0:
- # plot and show learning process
- plt.cla()
- prediction = torch.max(out, 1)[1]
- # torch.max(input, dim, keepdim=False, out=None) -> (Tensor, LongTensor) 按维度dim 返回最大值
- # torch.max(out, 1) 表示返回每一行中最大值的那个元素,且返回其索引(返回最大元素在这一行的列索引) index [0,1] ==> [元素,索引值]
- # 这里第一个1 表示维度的意思 第二个1 表示获得第二个位置的东西
- # 前面损失函数是CrossEntropyLoss 计算的softmax概率值
-
- # 这里,因为上面合并数据后,x是每一行都有一个x0的点,一个x1的点 y是对应的标签x0是0 x1标签是1
- # 所以softmax计算完的概率 通过 torch.max(out, 1)[1] 得到每个点最大概率的索引值也是0或者1 正好和标签0,1对应
-
- pred_y = prediction.data.numpy()
- target_y = y.data.numpy()
- plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
- accuracy = float((pred_y == target_y).astype(int).sum()) / float(target_y.size) # 预测中有多少和真实值一样
- plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
- plt.pause(0.1)
-
- plt.ioff()
- plt.show()

运行结果:
--------------------------快速搭建---------------------------------------------
####################################################################################################################################
F.relu() relu为一个功能
torch.nn.Sequential 中 torch.nn.ReLU(), ReLU()为一个类
完整程序:
-
- # replace following class code with an easy sequential network
- class Net(torch.nn.Module):
- def __init__(self, n_feature, n_hidden, n_output):
- super(Net, self).__init__()
- self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
- self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
-
- def forward(self, x):
- x = F.relu(self.hidden(x)) # activation function for hidden layer
- x = self.predict(x) # linear output
- return x
-
- net1 = Net(1, 10, 1)
-
- # easy and fast way to build your network
- net2 = torch.nn.Sequential(
- torch.nn.Linear(1, 10),
- torch.nn.ReLU(),
- torch.nn.Linear(10, 1)
- )
-
- print(net1) # net1 architecture
- print(net2) # net2 architecture

运行结果:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。