当前位置:   article > 正文

PyTorch:开始入门的一些小代码上(from 莫烦)_accuracy = sum(pred_y == target_y)/200

accuracy = sum(pred_y == target_y)/200


1.Tensor & numpy & Variable

  1. #torch和numpy的互相转化: .numpy() .from_numpy()
  2. np_data = np.arange(6).reshape((2,3))
  3. torch_data = torch.from_numpy(np_data)
  4. tensor2array = torch_data.numpy()
  5. print(
  6. '\nnumpy:',np_data,
  7. '\ntorch:',torch_data,
  8. '\ntensor2array:',tensor2array
  9. )
  10. #运算符:abs
  11. data = [-1,-2,1,2]
  12. tensor = torch.FloatTensor(data) #32bit
  13. print(
  14. '\nabs:',
  15. '\nnumpy:',np.abs(data),
  16. '\ntorch:',torch.abs(tensor)
  17. )
  18. #运算符:sin
  19. print(
  20. '\nsin:',
  21. '\nnumpy:',np.sin(data),
  22. '\ntorch:',torch.sin(tensor)
  23. )
  24. #运算符:mean
  25. print(
  26. '\nmean:',
  27. '\nnumpy:',np.mean(data),
  28. '\ntorch:',torch.mean(tensor)
  29. )
  30. #矩阵运算
  31. data = [[1,2],[3,4]]
  32. tensor = torch.FloatTensor(data) #32bit
  33. print(
  34. '\n矩阵相乘:', #Matrix Mutiply
  35. '\nnumpy:',np.matmul(data,data),
  36. '\ntorch:',torch.mm(tensor,tensor)
  37. )
  38. '''
  39. Tips:
  40. numpy矩阵相乘的另外一种方式:
  41. data = np.array(data)
  42. print(data.dot(data))
  43. 但是在torch里面这样做结果就不一样啦:
  44. print('torch:',tensor.dot(tensor))
  45. 输出结果是30.0
  46. 因为1*1+2*2+3*3+4*4=30
  47. '''
  48. #Variable变量:tensor不能反向传播,但variable可以
  49. var = Variable(tensor,requires_grad=True)
  50. print('tensoe:\n',tensor)
  51. print('Variable:\n',var)
  52. t_out = torch.mean(tensor*tensor) #x^2
  53. v_out = torch.mean(var*var)
  54. print('tensor mean:\n',t_out)
  55. print('Variable mean:\n',v_out)
  56. v_out.backward() #backward propagation
  57. print('variable grad:\n',var.grad)
  58. #因为v_out包含var,v_out = 1/4 * sum(var * var)
  59. #d(v_out)/d(var) = 1/4 * 2 * var = 1/2 * var
  60. #print(var.data)
  61. #print(var.data.numpy())


2.激励函数 Activation Function

  1. #Activation Function:
  2. x = torch.linspace(-5,5,200) #-5~5之间取200个点
  3. x = Variable(x)
  4. x_np = x.data.numpy() #torch的数据格式不能被matplotlib识别,需要转化成numpy
  5. y_relu = F.relu(x).data.numpy()
  6. y_sigmoid = F.sigmoid(x).data.numpy()
  7. y_tanh = F.tanh(x).data.numpy()
  8. y_softplus = F.softplus(x).data.numpy()
  9. #softmax计算的是分类问题的概率,对于线图无法做出
  10. plt.figure(1,figsize=(8,6))
  11. plt.subplot(221)
  12. plt.plot(x_np,y_relu,c='red',label='relu')
  13. plt.ylim((-1,5))
  14. plt.legend(loc='best')
  15. plt.subplot(222)
  16. plt.plot(x_np,y_sigmoid,c='red',label='sigmoid')
  17. plt.ylim((-0.2,1.2))
  18. plt.legend(loc='best')
  19. plt.subplot(223)
  20. plt.plot(x_np,y_tanh,c='red',label='tanh')
  21. plt.ylim((-1.2,1.2))
  22. plt.legend(loc='best')
  23. plt.subplot(224)
  24. plt.plot(x_np,y_softplus,c='red',label='softplus')
  25. plt.ylim((-0.2,6))
  26. plt.legend(loc='best')


3.Regression回归 & Classification分类

  1. #Regression
  2. x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1)
  3. # x data (tensor), shape=(100, 1)
  4. #将linespace的一维处理成二维,这样才能被torch处理
  5. y = x.pow(2) + 0.2*torch.rand(x.size())
  6. # noisy y data (tensor), shape=(100, 1)
  7. # x^2 + noise
  8. # torch can only train on Variable, so convert them to Variable
  9. x, y = Variable(x), Variable(y)
  10. #plt.scatter(x.data.numpy(), y.data.numpy()) #打印散点图
  11. #plt.show()
  12. #Define My Neural Network:
  13. class Net(torch.nn.Module):
  14. #initialization
  15. def __init__(self, n_feature, n_hidden, n_output):
  16. super(Net, self).__init__() #继承(官方步骤)
  17. self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
  18. self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
  19. #forward propagation:input->Linear->ReLU->Linear->output
  20. def forward(self, x):
  21. x = F.relu(self.hidden(x)) # activation function for hidden layer
  22. x = self.predict(x) # linear output
  23. return x
  24. #net的实现:
  25. net = Net(n_feature=1, n_hidden=10, n_output=1) # define the network
  26. print(net) # net architecture
  27. #net的优化:
  28. optimizer = torch.optim.SGD(net.parameters(), lr=0.5) #随机梯度下降优化
  29. loss_func = torch.nn.MSELoss() # 损失函数: mean squared loss ,MSE 均方差
  30. plt.ion() # something about plotting
  31. for t in range(100): #训练步数:100
  32. prediction = net(x) # input x and predict based on x
  33. loss = loss_func(prediction, y) # must be (1. nn output, 2. target)
  34. optimizer.zero_grad() # clear gradients for next train
  35. loss.backward() # backpropagation, compute gradients
  36. optimizer.step() # apply and optimize gradients
  37. if t % 5 == 0:
  38. # plot and show learning process
  39. plt.cla()
  40. plt.scatter(x.data.numpy(), y.data.numpy())
  41. plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
  42. plt.text(0.5, 0, 'Loss=%.4f' % loss.data[0], fontdict={'size': 20, 'color': 'red'})
  43. plt.pause(0.5)
  44. plt.ioff()
  45. plt.show()

  1. #Classification :
  2. torch.manual_seed(1) # reproducible 设定生成随机数的种子,返回一个 torch._C.Generator 对象.
  3. # make fake data
  4. n_data = torch.ones(100, 2)
  5. #class 0 :
  6. x0 = torch.normal(2*n_data, 1) # class0 x data (tensor), shape=(100, 2)
  7. y0 = torch.zeros(100) # class0 y data (tensor), shape=(100, 1)
  8. #class 1 :
  9. x1 = torch.normal(-2*n_data, 1) # class1 x data (tensor), shape=(100, 2)
  10. y1 = torch.ones(100) # class1 y data (tensor), shape=(100, 1)
  11. #两类数据分别以(2,2)、(-2,-2)为中心正态分布,标签分别为0和1
  12. #需要修改到torch可以运行的数据形式
  13. x = torch.cat((x0, x1), 0).type(torch.FloatTensor) # shape (200, 2) FloatTensor = 32-bit floating
  14. y = torch.cat((y0, y1), ).type(torch.LongTensor) # shape (200,) LongTensor = 64-bit integer
  15. # torch can only train on Variable, so convert them to Variable
  16. x, y = Variable(x), Variable(y)
  17. #plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=y.data.numpy(), s=100, lw=0, cmap='RdYlGn')
  18. #plt.show()
  19. #Define My Neural Network:
  20. class Net(torch.nn.Module):
  21. def __init__(self, n_feature, n_hidden, n_output):
  22. super(Net, self).__init__()
  23. self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
  24. self.out = torch.nn.Linear(n_hidden, n_output) # output layer
  25. def forward(self, x):
  26. x = F.relu(self.hidden(x)) # activation function for hidden layer
  27. x = self.out(x)
  28. return x
  29. net = Net(n_feature=2, n_hidden=10, n_output=2) # define the network
  30. #二分类输出属于每一类的概率,例如[1,0]表示是第一类,[0,1]表示是第二类
  31. print(net) # net architecture
  32. optimizer = torch.optim.SGD(net.parameters(), lr=0.02)
  33. loss_func = torch.nn.CrossEntropyLoss() # the target label is NOT an one-hotted
  34. plt.ion() # something about plotting
  35. #Train:
  36. for t in range(100):
  37. out = net(x) # input x and predict based on x
  38. loss = loss_func(out, y) # must be (1. nn output, 2. target), the target label is NOT one-hotted
  39. optimizer.zero_grad() # clear gradients for next train
  40. loss.backward() # backpropagation, compute gradients
  41. optimizer.step() # apply and optimize gradients
  42. if t % 2 == 0:
  43. # plot and show learning process
  44. plt.cla()
  45. prediction = torch.max(F.softmax(out), 1)[1] #因为输出的是每一类的可能,所以用softmax转换成概率,然后取最大
  46. pred_y = prediction.data.numpy().squeeze()
  47. target_y = y.data.numpy()
  48. plt.scatter(x.data.numpy()[:, 0], x.data.numpy()[:, 1], c=pred_y, s=100, lw=0, cmap='RdYlGn')
  49. accuracy = sum(pred_y == target_y)/200.
  50. plt.text(1.5, -4, 'Accuracy=%.2f' % accuracy, fontdict={'size': 20, 'color': 'red'})
  51. plt.pause(1.0)
  52. plt.ioff()
  53. plt.show()



快速定义网络:

  1. import numpy as np
  2. import torch
  3. from torch.autograd import Variable
  4. #import torch.nn as nn
  5. import torch.nn.functional as F
  6. import matplotlib.pyplot as plt
  7. # replace following class code with an easy sequential network
  8. #Method 1:定义类的方法
  9. class Net(torch.nn.Module):
  10. def __init__(self, n_feature, n_hidden, n_output):
  11. super(Net, self).__init__()
  12. self.hidden = torch.nn.Linear(n_feature, n_hidden) # hidden layer
  13. self.predict = torch.nn.Linear(n_hidden, n_output) # output layer
  14. def forward(self, x):
  15. x = F.relu(self.hidden(x)) # activation function for hidden layer
  16. x = self.predict(x) # linear output
  17. return x
  18. net1 = Net(1, 10, 1)
  19. # easy and fast way to build your network
  20. #Method 2:
  21. net2 = torch.nn.Sequential(
  22. torch.nn.Linear(1, 10),
  23. torch.nn.ReLU(),
  24. torch.nn.Linear(10, 1)
  25. )
  26. #2种方法是等价的,只不过在print的时候输出有所不同
  27. print(net1) # net1 architecture
  28. """
  29. Net (
  30. (hidden): Linear (1 -> 10)
  31. (predict): Linear (10 -> 1)
  32. )
  33. """
  34. print(net2) # net2 architecture
  35. """
  36. Sequential (
  37. (0): Linear (1 -> 10)
  38. (1): ReLU ()
  39. (2): Linear (10 -> 1)
  40. )
  41. """

神经网络的储存和提取:
  1. torch.manual_seed(1) # reproducible
  2. # fake data
  3. x = torch.unsqueeze(torch.linspace(-1, 1, 100), dim=1) # x data (tensor), shape=(100, 1)
  4. y = x.pow(2) + 0.2*torch.rand(x.size()) # noisy y data (tensor), shape=(100, 1)
  5. x, y = Variable(x, requires_grad=False), Variable(y, requires_grad=False)
  6. def save():
  7. # save net1
  8. net1 = torch.nn.Sequential(
  9. torch.nn.Linear(1, 10),
  10. torch.nn.ReLU(),
  11. torch.nn.Linear(10, 1)
  12. )
  13. optimizer = torch.optim.SGD(net1.parameters(), lr=0.5)
  14. loss_func = torch.nn.MSELoss()
  15. for t in range(100):
  16. prediction = net1(x)
  17. loss = loss_func(prediction, y)
  18. optimizer.zero_grad()
  19. loss.backward()
  20. optimizer.step()
  21. # plot result
  22. plt.figure(1, figsize=(10, 3))
  23. plt.subplot(131)
  24. plt.title('Net1')
  25. plt.scatter(x.data.numpy(), y.data.numpy())
  26. plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
  27. # 2 ways to save the net
  28. torch.save(net1, 'net.pkl') # save entire net
  29. torch.save(net1.state_dict(), 'net_params.pkl') # save only the parameters
  30. def restore_net():
  31. # restore entire net1 to net2
  32. net2 = torch.load('net.pkl') #提取整个网络
  33. prediction = net2(x)
  34. # plot result
  35. plt.subplot(132)
  36. plt.title('Net2')
  37. plt.scatter(x.data.numpy(), y.data.numpy())
  38. plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
  39. def restore_params():
  40. # restore only the parameters in net1 to net3
  41. net3 = torch.nn.Sequential(
  42. torch.nn.Linear(1, 10),
  43. torch.nn.ReLU(),
  44. torch.nn.Linear(10, 1)
  45. )
  46. # copy net1's parameters into net3
  47. #首先要建立一个和net1一样结构的网络,才能成功提取它的参数
  48. net3.load_state_dict(torch.load('net_params.pkl'))
  49. prediction = net3(x)
  50. # plot result
  51. plt.subplot(133)
  52. plt.title('Net3')
  53. plt.scatter(x.data.numpy(), y.data.numpy())
  54. plt.plot(x.data.numpy(), prediction.data.numpy(), 'r-', lw=5)
  55. plt.show()
  56. # save net1
  57. save()
  58. # restore entire net (may slow)
  59. restore_net()
  60. # restore only the net parameters
  61. restore_params()
  62. #据说提取参数的方法会比提取整个网络快一点


批数据训练:
  1. import torch
  2. import torch.utils.data as Data
  3. torch.manual_seed(1) # reproducible
  4. BATCH_SIZE = 5
  5. # BATCH_SIZE = 8 如果取8但总共数据集只有10个点,则第一个batch8个,第二个2个
  6. x = torch.linspace(1, 10, 10) # this is x data (torch tensor)
  7. y = torch.linspace(10, 1, 10) # this is y data (torch tensor)
  8. torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
  9. #将数据分批
  10. #shuffile如果不定义的话(即DataLoader的()内为空),则默认按顺序提取batch数据
  11. #shuffile为True则会打乱顺序进行提取
  12. #num_workers定义每次提取batch用的线程数
  13. loader = Data.DataLoader(
  14. dataset=torch_dataset, # torch TensorDataset format
  15. batch_size=BATCH_SIZE, # mini batch size
  16. shuffle=True, # random shuffle for training
  17. num_workers=2, # subprocesses for loading data
  18. )
  19. #epoch:表示一个批处理周期,在这个周期内,
  20. #将数据分成batch_size的大小,全部训练完毕为一个epoch
  21. for epoch in range(3): # train entire dataset 3 times
  22. for step, (batch_x, batch_y) in enumerate(loader): # for each training step
  23. # train your data...
  24. print('Epoch: ', epoch, '| Step: ', step, '| batch x: ',
  25. batch_x.numpy(), '| batch y: ', batch_y.numpy())


4.Optimizer
  1. import torch
  2. import torch.utils.data as Data
  3. import torch.nn.functional as F
  4. from torch.autograd import Variable
  5. import matplotlib.pyplot as plt
  6. torch.manual_seed(1) # reproducible
  7. #一些超参数,常用全大写的变量来命名
  8. LR = 0.01
  9. BATCH_SIZE = 32
  10. EPOCH = 12
  11. # fake dataset
  12. x = torch.unsqueeze(torch.linspace(-1, 1, 1000), dim=1)
  13. y = x.pow(2) + 0.1*torch.normal(torch.zeros(*x.size()))
  14. # plot dataset
  15. #plt.scatter(x.numpy(), y.numpy())
  16. #plt.show()
  17. # put dateset into torch dataset
  18. torch_dataset = Data.TensorDataset(data_tensor=x, target_tensor=y)
  19. loader = Data.DataLoader(dataset=torch_dataset, batch_size=BATCH_SIZE, shuffle=True) #这里本来还有个参数,但是不知道为什么一加上这个程序就运行不了
  20. # default network
  21. class Net(torch.nn.Module):
  22. def __init__(self):
  23. super(Net, self).__init__()
  24. self.hidden = torch.nn.Linear(1, 20) # hidden layer
  25. self.predict = torch.nn.Linear(20, 1) # output layer
  26. def forward(self, x):
  27. x = F.relu(self.hidden(x)) # activation function for hidden layer
  28. x = self.predict(x) # linear output
  29. return x
  30. # different nets
  31. net_SGD = Net()
  32. net_Momentum = Net()
  33. net_RMSprop = Net()
  34. net_Adam = Net()
  35. #将4个神经网络定义在一个list中,以便之后在for循环中循环训练
  36. nets = [net_SGD, net_Momentum, net_RMSprop, net_Adam]
  37. # different optimizers
  38. opt_SGD = torch.optim.SGD(net_SGD.parameters(), lr=LR)
  39. opt_Momentum = torch.optim.SGD(net_Momentum.parameters(), lr=LR, momentum=0.8)
  40. opt_RMSprop = torch.optim.RMSprop(net_RMSprop.parameters(), lr=LR, alpha=0.9)
  41. opt_Adam = torch.optim.Adam(net_Adam.parameters(), lr=LR, betas=(0.9, 0.99))
  42. optimizers = [opt_SGD, opt_Momentum, opt_RMSprop, opt_Adam]
  43. loss_func = torch.nn.MSELoss()
  44. losses_his = [[], [], [], []] # record loss
  45. # training
  46. for epoch in range(EPOCH):
  47. print('Epoch: ', epoch)
  48. for step, (batch_x, batch_y) in enumerate(loader): # for each training step
  49. #此前的类型是tensor,需要封装在Variable中才能被nn处理
  50. b_x = Variable(batch_x)
  51. b_y = Variable(batch_y)
  52. for net, opt, l_his in zip(nets, optimizers, losses_his):
  53. output = net(b_x) # get output for every net
  54. loss = loss_func(output, b_y) # compute loss for every net
  55. opt.zero_grad() # clear gradients for next train
  56. loss.backward() # backpropagation, compute gradients
  57. opt.step() # apply gradients
  58. l_his.append(loss.data[0]) # loss recoder
  59. labels = ['SGD', 'Momentum', 'RMSprop', 'Adam']
  60. for i, l_his in enumerate(losses_his):
  61. plt.plot(l_his, label=labels[i])
  62. plt.legend(loc='best')
  63. plt.xlabel('Steps')
  64. plt.ylabel('Loss')
  65. plt.ylim((0, 0.2))
  66. plt.show()





本文内容由网友自发贡献,转载请注明出处:https://www.wpsshop.cn/w/Monodyee/article/detail/358115
推荐阅读
相关标签
  

闽ICP备14008679号