当前位置:   article > 正文

torch入门_torch.ones

torch.ones

张量的创建

  1. import torch
  2. import numpy as np
  3. #创建一个张量
  4. x=torch.randn((5,3),dtype=torch.float16)
  5. #张量的形状
  6. x.shape
  7. #创建一个空张量
  8. x=torch.empty((2,3),dtype=torch.float32)
  9. #零张量
  10. x=torch.zeros((2,3),dtype=torch.long)
  11. #1张量
  12. x=torch.ones(2,3)
  13. #对角都是1
  14. x=torch.eye(3,4)
  15. #从列表创建,并返回列表
  16. x=torch.tensor([[2,3,4],[2,3,6]],dtype=torch.float16)
  17. x.tolist()
  18. #从arr创建,并返回arr
  19. a=np.random.random((2,2))
  20. x=torch.from_numpy(a)
  21. x.numpy()
  22. '''
  23. 区别:from_numpy和torch.tensor
  24. from_numpy:如果arr变化,由arr创建的tensor也会变化
  25. torch.tensor:arr变化,由arr创建的tensor不会变化
  26. '''
  27. #改变形状,reshape更强大
  28. x.reshape(1,-1)
  29. x.view(1,-1)

常见计算

  1. x=torch.tensor([[2,3,4],[2,3,6]])
  2. y=torch.tensor([[1,2,1],[2,6,0]])
  3. x+y
  4. x-y
  5. x / y
  6. x*y
  7. #求两个tensor对应位置上的最大值
  8. torch.maximum(torch.tensor(3),x)
  9. #平方
  10. torch.pow(x,2)
  11. #某个轴的最大值
  12. torch.max(x,1)

梯度计算和梯度下降过程 

  1. x=np.linspace(0,100,10000)
  2. noise=np.random.uniform(size=(10000,))
  3. #自定:w=10,b=10
  4. y=10*x+10+noise
  5. x = torch.from_numpy(x)
  6. y = torch.from_numpy(y)
  7. w=torch.randn(1,requires_grad=True)
  8. b=torch.randn(1,requires_grad=True)
  9. #回归拟合
  10. for epoch in range(500000000):
  11. #计算预测值
  12. y_ = x * w + b
  13. #计算损失
  14. loss = torch.mean((y_ - y)**2)
  15. if epoch==0:
  16. #反向传播
  17. loss.backward()
  18. else:
  19. # 归零梯度
  20. w.grad.zero_()
  21. b.grad.zero_()
  22. #反向传播
  23. loss.backward()
  24. #梯度更新,步长的选择是个讲究活,不然会发散,或者训练太慢
  25. w.data = w.data - 2e-4 * w.grad.data
  26. b.data = b.data - 2e-4 * b.grad.data
  27. if loss<0.1:
  28. break
  29. #print(w,b)
  30. #w:10.0038;b:10.2498
  31. #print('epoch: {}, loss: {}'.format(epoch, loss.data))

使用矩阵乘法实现全连接层 

  1. x=torch.randn((4,5))
  2. w_true=torch.randint(1,10,size=(5,1),dtype=torch.float32)
  3. b_true=torch.tensor(20.0)
  4. noise=torch.randn(size=(4,1))
  5. #矩阵乘法
  6. y=x@w_true+b_true+noise
  7. w=torch.zeros(size=(5,1),requires_grad=True,dtype=torch.float32)
  8. b=torch.zeros(1,requires_grad=True)
  9. #训练
  10. for epoch in range(10000000):
  11. y_=x@w+b
  12. loss=torch.mean((y-y_)**2)
  13. if epoch==0:
  14. loss.backward()
  15. else:
  16. w.grad.zero_()
  17. b.grad.zero_()
  18. loss.backward()
  19. w.data=w.data - 2e-4 * w.grad.data
  20. b.data=b.data - 2e-4 *b.grad.data
  21. if loss<0.1:
  22. break
  23. '''
  24. #权重
  25. w:[[ 0.5081],
  26. [ 5.0037],
  27. [ 0.8767],
  28. [ 4.9839],
  29. [13.5279]]
  30. #偏置
  31. b:[14.1485]
  32. #损失
  33. loss:0.1000
  34. '''

使用nn.Linear层

  1. from torch import nn
  2. from torch import optim
  3. #构建网络
  4. net=nn.Linear(5,1,bias=True)
  5. #构建优化器
  6. optimizer=optim.Adam(net.parameters(),lr=2e-4)
  7. for epoch in range(10000000):
  8. y_=net(x)
  9. loss=torch.mean((y-y_)**2)
  10. #梯度归零
  11. optimizer.zero_grad()
  12. #计算梯度
  13. loss.backward()
  14. #更新梯度
  15. optimizer.step()
  16. if loss<0.1:
  17. break
  18. #权重
  19. #[ 0.6655, 4.8166, -3.5347, 7.4862, 13.4877]
  20. net.weight.data
  21. #偏置
  22. #[13.6001]
  23. net.bias.data
  24. #损失
  25. 0.0999

 激活函数

  1. #ELU
  2. def ELU_self(x, a=1.0):
  3. x=torch.tensor(x)
  4. x_0=torch.tensor(0)
  5. return torch.maximum(x_0, x) + torch.minimum(x_0, a * (torch.exp(x) - 1))
  6. #LeakyReLU
  7. def LeakyReLU_self(x, a=1e-2):
  8. x=torch.tensor(x)
  9. x_0=torch.tensor(0)
  10. return torch.maximum(x_0, x) + a * torch.minimum(x_0, x)
  11. #ReLU
  12. def ReLU_self(x):
  13. x=torch.tensor(x)
  14. x_0=torch.tensor(0)
  15. return torch.maximum(x_0,x)
  16. #ReLU6
  17. def ReLU6_self(x):
  18. x=torch.tensor(x)
  19. x_0=torch.tensor(0)
  20. x_6=torch.tensor(6)
  21. return torch.minimum(torch.maximum(x_0, x), x_6)
  22. #SELU
  23. def SELU_self(x,
  24. scale=1.0507009873554804934193349852946,
  25. a=1.6732632423543772848170429916717):
  26. x = torch.tensor(x)
  27. x_0 = torch.tensor(0)
  28. return scale * (torch.maximum(x_0, x) +
  29. torch.minimum(x_0, a * (torch.exp(x) - 1)))
  30. #CELU
  31. def CELU_self(x, a=1.0):
  32. x = torch.tensor(x)
  33. x_0 = torch.tensor(0)
  34. return torch.maximum(x_0, x) + torch.minimum(x_0,
  35. a * (torch.exp(x / a) - 1.0))
  36. #Sigmoid
  37. def Sigmoid_self(x):
  38. x = torch.tensor(x)
  39. return 1.0 / (1 + torch.exp(-x))
  40. #LogSigmoid
  41. def LogSigmoid_self(x):
  42. x = torch.tensor(x)
  43. return torch.log(1.0 / (1 + torch.exp(-x)))
  44. #Tanh
  45. def Tanh_self(x):
  46. x = torch.tensor(x)
  47. return 1 - 2.0 / (torch.exp(2 * x) + 1)
  48. #Tanhshrink
  49. def Tanhshrink_self(x):
  50. x = torch.tensor(x)
  51. return x + 2.0 / (torch.exp(2 * x) + 1) - 1
  52. #Softplus
  53. def Softplus_self(x, b=1.0):
  54. x = torch.tensor(x)
  55. return 1 / b * torch.log(1 + torch.exp(x * b))
  56. #Softshrink,感觉就是中心化
  57. def Softshrink_self(x,lambd=0.5):
  58. x_=torch.tensor(x)
  59. x_=torch.where(x_>lambd,x_-lambd,x_)
  60. x_=torch.where(x_<-lambd,x_+lambd,x_)
  61. x_[x==x_]=0
  62. return x_

卷积层原理和使用 

  1. import matplotlib.pyplot as plt
  2. #用来读取图片
  3. from PIL import Image
  4. import torch.nn as nn
  5. from torchvision import transforms
  6. from torchkeras import summary
  7. image=Image.open('tu.jpg')
  8. # 把图片数据转化成张量
  9. img_transform = transforms.Compose([transforms.ToTensor()])
  10. img_tensor = img_transform(image)
  11. #卷积的输入是4维张量
  12. #'_'操作是就地更改
  13. img_tensor.unsqueeze_(dim=0)
  14. flag=0
  15. if flag:
  16. #输入通道,卷积个数,卷积核大小,步长,填充
  17. conv_layer = nn.Conv2d(in_channels=3,out_channels=1,kernel_size=5,stride=1,padding=2)
  18. # 初始化卷积层权值
  19. nn.init.xavier_normal_(conv_layer.weight.data)
  20. # nn.init.xavier_uniform_(conv_layer.weight.data)
  21. # calculation
  22. img_conv = conv_layer(img_tensor)
  23. else:
  24. #转置卷积
  25. conv_layer_ts = nn.ConvTranspose2d(in_channels=3,out_channels=1,kernel_size=5,stride=1,padding=2)
  26. nn.init.xavier_normal_(conv_layer_ts.weight.data)
  27. img_conv_ts = conv_layer_ts(img_tensor)

参数的计算 

参数=卷积个数*卷积核大小*通道数+ 卷积个数

76 = 1*5*5*3+1

#参数个数
32*5*5*1+32

卷基层大小 

  1. #(输入大小-卷积核大小+2倍的填充)/步长+1
  2. #500=(500-5+2*2)/1+1
  3. img_conv.shape
  4. torch.Size([1, 1, 500, 500])

画图展示

  1. img_tensor.squeeze_(dim=0)
  2. img_conv.squeeze_(dim=0)
  3. img_conv_ts.squeeze_(dim=0)
  4. plt.subplot(131).imshow(np.transpose(img_tensor.data.numpy(),[1,2,0]))
  5. plt.axis('off')
  6. plt.subplot(132).imshow(np.transpose(img_conv.data.numpy(),[1,2,0]))
  7. plt.axis('off')
  8. plt.subplot(133).imshow(np.transpose(img_conv_ts.data.numpy(),[1,2,0]))
  9. plt.tight_layout()
  10. plt.axis('off')
  11. plt.show()

损失函数

  1. #标准的使用流程
  2. criterion=Losscriterion()
  3. loss=criterion(y_,y)

常见loss的使用

  1. #BCELoss,二分类损失
  2. #y_pred在前,y_true在后
  3. loss=nn.BCELoss()
  4. m=nn.Sigmoid()
  5. x=torch.randn(3,requires_grad=True)
  6. y_=m(x)
  7. y=torch.randint(0,2,size=(3,),dtype=torch.float)
  8. loss=loss(y_,y)
  9. with torch.no_grad():
  10. loss.backward()
  11. loss
  12. # NLLLoss,多分类损失
  13. loss=nn.NLLLoss()
  14. m=nn.Softmax(dim=1)
  15. x=torch.randn((3,4),requires_grad=True)
  16. y_=m(x)
  17. y=torch.randint(0,4,size=(3,))
  18. loss=loss(y_,y)
  19. with torch.no_grad():
  20. loss.backward()
  21. loss
  22. #L1Loss,MAE
  23. loss=nn.L1Loss()
  24. y_=torch.randn((1,5),requires_grad=True)
  25. y=torch.randn((1,5))
  26. loss=loss(y_,y)
  27. with torch.no_grad():
  28. loss.backward()
  29. loss
  30. #MSELoss
  31. loss=nn.MSELoss()
  32. y_=torch.randn((1,5),requires_grad=True)
  33. y=torch.randn((1,5))
  34. loss=loss(y_,y)
  35. with torch.no_grad():
  36. loss.backward()
  37. loss

优化器的使用 

  1. from torch import optim
  2. #一般的流程
  3. #定义优化器
  4. optimizer=Optim()
  5. #导数归零
  6. optimizer.zero_grad()
  7. #更新
  8. optimizer.step()
  1. x = torch.randn((4,5),requires_grad=False)
  2. w_true = torch.randint(1, 10, size=(5, 1), dtype=torch.float)
  3. b_true = torch.tensor(20.0)
  4. noise = torch.randn(size=(4, 1))
  5. y = x @ w_true + b_true + noise
  6. result = {}
  7. for lr in [0.01, 0.1, 0.5]:
  8. #每次要重新更新网络
  9. net = nn.Linear(5, 1, bias=True)
  10. #定义优化器
  11. optimizer = optim.SGD(net.parameters(), lr=lr)
  12. #定义损失
  13. mseloss = nn.MSELoss()
  14. for epoch in range(10000000):
  15. #梯度清零
  16. optimizer.zero_grad()
  17. #计算损失
  18. loss = mseloss(net(x), y)
  19. #反向传播
  20. loss.backward()
  21. #更新
  22. optimizer.step()
  23. if loss.item() < 0.1 or epoch >= 10000:
  24. result[lr] = {'loss': loss.item(), 'epoch': epoch}
  25. break
  26. #结果
  27. #当lr过大时,发散了,不能收敛
  28. result=
  29. {0.01: {'loss': 0.09930270910263062, 'epoch': 766},
  30. 0.1: {'loss': 0.0925668329000473, 'epoch': 76},
  31. 0.5: {'loss': nan, 'epoch': 10000}}

池化层

  1. x=torch.randn(10,3,128,128)
  2. #MaxPool2d
  3. maxp=nn.MaxPool2d(5,3)
  4. #42=(128-5+0*2)/3+1,是向下取整
  5. maxp(x).shape
  6. torch.Size([10, 3, 42, 42])
  7. maxp(x)[0,0,0,4]
  8. tensor(1.9936)
  9. #AvgPool2d,取窗口的平均值
  10. avgp=nn.AvgPool2d(5,3)
  11. #42=(128-5+0*2)/3+1,是向下取整
  12. avgp(x).shape
  13. torch.Size([10, 3, 42, 42])
  14. avgp(x)[0,0,0,4]
  15. tensor(-0.1445)

归一化层

  1. BN 层去加速训练和帮助模型更好收敛
  2. BN 层仅在 batch size 足够大时才有明显的效果
  1. x=torch.randint(0,256,size=(10,3,128,128)).float()
  2. #BN
  3. bn=nn.BatchNorm2d(3)
  4. bn(x)[0,0,0,2]
  5. tensor(1.1019, grad_fn=<SelectBackward>)
  6. #GN
  7. #num_channels需要被num_groups整除
  8. gn=nn.GroupNorm(num_groups=3,num_channels=3)
  9. gn(x)[0,0,0,2]
  10. tensor(1.0831, grad_fn=<SelectBackward>)
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/煮酒与君饮/article/detail/832086
推荐阅读
相关标签
  

闽ICP备14008679号