当前位置:   article > 正文

【Pytorch深度学习框架入门】_pytorch框架

pytorch框架

1.pytorch加载数据

两个有用的工具箱:

help(torch.cuda.is_available) # 对函数进行操作

唤醒指定的python运行环境的命令:

conda activate 环境的名称

Dataset 和 Dataloader 

Dataset:提供一种方式去获取数据及其label

  • 如何获取每一个数据及其label
  • 告诉我们总共有多少数据

Dataloader :为后面的网络提供不同的数据形式

  1. from torch.utils.data import Dataset #Dataset数据处理的包
  2. from PIL import Image
  3. import os
  4. #定义数据处理的类
  5. class MyData(Dataset):
  6. #数据地址处理方法
  7. def __init__(self,root_dir,label_dir):
  8. self.root_dir = root_dir #读取数据文件的根地址
  9. self.label_dir = label_dir #读取数据文件的字地址
  10. self.path = os.path.join(self.root_dir,self.label_dir)# 将根地址和子地址进行拼接
  11. self.img_path = os.listdir(self.path) #将图片的地址提取出来,并一个个存入到列表中去
  12. #提取每一个图片的信息
  13. def __getitem__(self, idx):
  14. img_name = self.img_path[idx] #根据序号从列表中找到相应的图片地址
  15. img_item_path = os.path.join(self.root_dir,self.label_dir,img_name)# 将根地址与图片地址进行拼接
  16. img = Image.open(img_item_path) #将地址转换为图片的形式
  17. label = self.label_dir# 读取标签的地址
  18. return img,label #返回图片和标签
  19. #计算数据集的长度
  20. def __len__(self):
  21. return len(self.img_path)
  22. root_dir = "dataset/train"
  23. ants_label_dir = "ants"
  24. bees_label_dir = "bees"
  25. ants_dataset = MyData(root_dir,ants_label_dir)
  26. bees_dataset = MyData(root_dir,bees_label_dir)
  27. train_dataset = ants_dataset + bees_dataset

2.TensorBoard的使用

  1. from torch.utils.tensorboard import SummaryWriter
  2. from PIL import Image
  3. import numpy as np
  4. writer = SummaryWriter("logs")
  5. image_path = "dataset/train/ants/0013035.jpg"
  6. img_PIL = Image.open(image_path)
  7. img_array = np.array(img_PIL)
  8. print(type(img_array))
  9. print(img_array.shape)
  10. # writer.add_image("test",img_array,1,dataformats='HWC')# 其中1的作用主要是为了进行步数的设置
  11. # y = x
  12. for i in range(100):
  13. writer.add_scalar("y=2x",3*i,i)
  14. writer.close()

启动日志的相关命令

tensorboard --logdir=logs --port=6007

3. transfrom的使用

下面图片是transform的图解:

  1. """
  2. transform的讲解
  3. """
  4. from PIL import Image
  5. from torch.utils.tensorboard import SummaryWriter
  6. from torchvision import transforms
  7. #python的用法 -》 tensor数据类型
  8. #通过 transform.Totensor去看两个问题
  9. # 1、transform该如何去使用(python)
  10. # 2、为什么我们需要tensor数据类型
  11. # 绝对路径:"F:\learn_pytorch\p9_transform.py"
  12. # 相对路径:"dataset/train/ants/0013035.jpg"
  13. #为什么不选择使用绝对路径,因为在window系统下,\会被认为是转移字符
  14. img_path = "dataset/train/ants/0013035.jpg"# 读取图片的相对地址
  15. img_path_abs = "F:\learn_pytorch\p9_transform.py"# 读取图片的绝对地址
  16. img = Image.open(img_path)# 打开图片
  17. #print(img)
  18. writer = SummaryWriter("logs") # 创建TensorBoard对象
  19. # 1、transform该如何去使用(python)
  20. tensor_trans = transforms.ToTensor()# 创建一个tensor_trans的图片类型转换工具的对象
  21. tensor_img = tensor_trans(img)# 将img转化成tensor的形式
  22. #print(tensor_img)
  23. writer.add_image("Tensor_img",tensor_img)# 利用TensorBoard展示数据

4.常见的transform

 Python中__call__的用法

  1. class Person:
  2. def __call__(self,name):
  3. print("__call__"+"Hello"+name)
  4. def hello(self,name):
  5. print("hello"+name)
  6. person = Person()
  7. person("张三")
  8. person.hello("lisi")

Totensor()的使用

  1. #Totensor()的使用
  2. trans_Totensor = transforms.ToTensor()
  3. img_tensor = trans_Totensor(img)
  4. writer.add_image('ToTensor',img_tensor)

Normalize()的使用

  1. print(img_tensor[0][0][0])
  2. trans_norm = transforms.Normalize([0.5,0.5,0.5],[0.5,0.5,0.5])
  3. img_norm = trans_norm(img_tensor)
  4. print(img_norm[0][0][0])

Resize()的使用

  1. #Resize()
  2. print(img.size)
  3. trans_size = transforms.Resize((512,512))
  4. # img PIL -> resize ->img_resize PIL
  5. img_resize = trans_size(img)
  6. # img_resize PIL -> totensor ->img_resize tensor
  7. img_resize = trans_Totensor(img_resize)
  8. writer.add_image('Resize',img_resize,0)
  9. print(img_resize)

Compose()的使用

  1. #Compose() -resize -2
  2. trans_resize_2 = transforms.Resize(512)
  3. # PIL -> PIL -> tensor
  4. trans_compose = transforms.Compose([trans_resize_2,trans_Totensor])
  5. img_resize_2 = trans_compose(img)
  6. writer.add_image('Compose',img_resize_2)
RandomCrop()的使用
  1. #RandomCrop()
  2. trans_random = transforms.RandomCrop((500,20))
  3. trans_compose_2 = transforms.Compose([trans_random,trans_Totensor])
  4. for i in range(10):
  5. img_crop = trans_compose_2(img)
  6. writer.add_image('RandomCrop',img_crop,i)

 4.torchvision中数据集的使用

进入pytorch的官网

依次进入到Docs->torchvision->dataset

 相关代码:

  1. import torchvision
  2. from torch.utils.tensorboard import SummaryWriter
  3. dataset_transform = torchvision.transforms.Compose([
  4. torchvision.transforms.ToTensor()
  5. ])
  6. train_set = torchvision.datasets.(root="./dataset1",train=True,download=True,transform=dataset_transform)#构建训练集
  7. test_set = torchvision.datasets.CIFAR10(root="./dataset1",train=False,download=True,transform=dataset_transform)#构建测试集
  8. '''
  9. print(test_set[0])
  10. print(test_set.classes)
  11. img,target = test_set[0]
  12. print(img)
  13. print(target)
  14. img.show()
  15. '''
  16. # print(test_set[0])
  17. writer = SummaryWriter('p10')
  18. # writer.add_image()
  19. for i in range(10):
  20. img,target = test_set[i]
  21. writer.add_image('test_set',img,i)
  22. writer.close()

5.dataloader的使用

 

  1. import torchvision
  2. from torch.utils.data import DataLoader
  3. from torch.utils.tensorboard import SummaryWriter
  4. test_data = torchvision.datasets.CIFAR10(root="./dataset1",train=True,transform=torchvision.transforms.ToTensor(),download=True)
  5. test_loader = DataLoader(dataset=test_data,batch_size=64,shuffle=False,num_workers=0,drop_last=False)
  6. #测试数据集里面的第一章图片及target
  7. img,target = test_data[0]
  8. print(img.shape)
  9. print(target)
  10. writer = SummaryWriter('dataloader')
  11. for epoch in range(2):#进行两轮
  12. step = 0
  13. for data in test_loader:
  14. imgs,targets = data
  15. writer.add_images(f"Epoch{epoch}",imgs,step)
  16. step = step + 1
  17. # print(imgs.shape)
  18. # print(target)
  19. print("读取结束")
  20. writer.close()

 6.神经网络的基本骨架-nn.Mouble的使用

 

  1. import torch
  2. from torch import nn
  3. class Tudui(nn.Module):
  4. def __init__(self) -> None:
  5. super().__init__()
  6. def forward(self,input):
  7. output = input + 1
  8. return output
  9. tutui = Tudui()
  10. x = torch.tensor(1.0)
  11. output = tutui(x)
  12. print(output)

 7.卷积操作


需要重点学会的是:Conv2d

对应位置相乘,然后加在一起

 

 注意padding填充的全部为0。

 需要注意以上的参数的要求:

  1. import torch
  2. import torch.nn.functional as F
  3. #输入
  4. input = torch.tensor([[1,2,0,3,1],
  5. [0,1,2,3,1],
  6. [1,2,1,0,0],
  7. [5,2,3,1,1],
  8. [2,1,0,1,1]])
  9. #卷积核
  10. kernel = torch.tensor([[1,2,1],
  11. [0,1,0],
  12. [2,1,0]])
  13. input = torch.reshape(input,(1,1,5,5))
  14. kernel = torch.reshape(kernel,(1,1,3,3))
  15. print(input.shape)
  16. print(kernel.shape)
  17. output1= F.conv2d(input,kernel,stride=1)
  18. print(output1)
  19. output2 = F.conv2d(input,kernel,stride=2)
  20. print(output2)
  21. output3 = F.conv2d(input,kernel,stride=1,padding=1)
  22. print(output3)

 8.神经网络-卷积层

 

 当out_channel = 2的时候,此时会设置2个卷积核

  1. import torch
  2. import torchvision
  3. #加载测试集
  4. from torch import nn
  5. from torch.nn import Conv2d
  6. from torch.utils.data import DataLoader
  7. from torch.utils.tensorboard import SummaryWriter
  8. dataset = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
  9. dataloader = DataLoader(dataset,batch_size=64,num_workers=0)
  10. class Tudui(nn.Module):
  11. def __init__(self):
  12. super(Tudui, self).__init__()
  13. self.conv1 = Conv2d(in_channels=3,out_channels=6,kernel_size=3,stride=1,padding=0)
  14. def forward(self,x):
  15. x = self.conv1(x)
  16. return x
  17. tudui = Tudui()
  18. print(tudui)
  19. writer = SummaryWriter('./logs')
  20. step = 0
  21. for data in dataloader:
  22. imgs,targets = data
  23. ouput = tudui(imgs)
  24. print(imgs.shape)
  25. print(ouput.shape)
  26. writer.add_images("input",imgs,step)
  27. ouput = torch.reshape(ouput, (-1, 3, 30, 30)) # ->[xxx,3,30,30],3是通道数减少,使得xxx的batchsize变大
  28. writer.add_images("ouput",ouput,step)
  29. step = step + 1
  30. print("over")

  9.神经网络-最大池化的使用

 

 

 

 

  1. import torch
  2. from torch import nn
  3. from torch.nn import MaxPool2d
  4. input = torch.tensor([[1,2,0,3,1],
  5. [0,1,2,3,1],
  6. [1,2,1,0,0],
  7. [5,2,3,1,1],
  8. [2,1,0,1,1]],dtype=torch.float)
  9. input = torch.reshape(input,(-1,1,5,5))
  10. class Tudui(nn.Module) :
  11. def __init__(self):
  12. super(Tudui, self).__init__()
  13. self.maxpool = MaxPool2d(kernel_size=3,ceil_mode=False)
  14. def forward(self,input):
  15. output = self.maxpool(input)
  16. return output
  17. tudui = Tudui()
  18. output = tudui(input)
  19. print(output)

 最大池化的作用:就是压缩。

  1. import torch
  2. import torchvision
  3. from torch import nn
  4. from torch.nn import MaxPool2d
  5. from torch.utils.data import DataLoader
  6. from torch.utils.tensorboard import SummaryWriter
  7. dataset = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True)
  8. dataloader = DataLoader(dataset,batch_size=64,shuffle=True)
  9. '''
  10. input = torch.tensor([[1,2,0,3,1],
  11. [0,1,2,3,1],
  12. [1,2,1,0,0],
  13. [5,2,3,1,1],
  14. [2,1,0,1,1]],dtype=torch.float)
  15. input = torch.reshape(input,(-1,1,5,5))
  16. '''
  17. class Tudui(nn.Module) :
  18. def __init__(self):
  19. super(Tudui, self).__init__()
  20. self.maxpool = MaxPool2d(kernel_size=3,ceil_mode=False)
  21. def forward(self,input):
  22. output = self.maxpool(input)
  23. return output
  24. tudui = Tudui()
  25. writer = SummaryWriter("logs")
  26. step = 0
  27. for data in dataloader:
  28. imgs,target = data
  29. writer.add_images("imgs",imgs,step)
  30. print(imgs.shape)
  31. output = tudui(imgs)
  32. writer.add_images("maxpool",output,step)
  33. print(output.shape)
  34. step = step + 1
  35. writer.close()
  36. print("over")
  37. # tudui = Tudui()
  38. # output = tudui(input)
  39. # print(output)

10.神经网络-非线性激活

inplace参数的讲解

  1. '''
  2. ReLU
  3. '''
  4. import torch
  5. from torch import nn
  6. from torch.nn import ReLU
  7. input = torch.tensor([[1,-0.5],
  8. [-1,3]])
  9. input = torch.reshape(input,(-1,1,2,2))
  10. print(input.shape)
  11. class Tudui(nn.Module):
  12. def __init__(self):
  13. super(Tudui, self).__init__()
  14. self.relu1 = ReLU()
  15. def forward(self,input):
  16. output = self.relu1(input)
  17. return output
  18. tudui = Tudui()
  19. output = tudui(input)
  20. print(output)

  1. '''
  2. Sigmoid
  3. '''
  4. import torch
  5. import torchvision.datasets
  6. from torch import nn
  7. from torch.nn import ReLU, Sigmoid
  8. from torch.utils.data import DataLoader
  9. from torch.utils.tensorboard import SummaryWriter
  10. input = torch.tensor([[1,-0.5],
  11. [-1,3]])
  12. input = torch.reshape(input,(-1,1,2,2))
  13. print(input.shape)
  14. dataset = torchvision.datasets.CIFAR10(root="./dataset",train=False,download=True,
  15. transform=torchvision.transforms.ToTensor())
  16. dataloader = DataLoader(dataset,batch_size=64)
  17. class Tudui(nn.Module):
  18. def __init__(self):
  19. super(Tudui, self).__init__()
  20. self.relu1 = ReLU()
  21. self.sigmoid1 = Sigmoid()
  22. def forward(self,input):
  23. output = self.sigmoid1(input)
  24. return output
  25. tudui = Tudui()
  26. writer = SummaryWriter("logs")
  27. step = 0
  28. for data in dataloader:
  29. imgs,targets = data
  30. print(imgs.shape)
  31. writer.add_images("imgs",imgs,step)
  32. output = tudui(imgs)
  33. print(output.shape)
  34. writer.add_images("Sigmod",output,step)
  35. writer.close()

11.神经网络-线性层及其它层的介绍 

 线性层

  1. """
  2. vgg16
  3. """
  4. import torch
  5. import torchvision.datasets
  6. from torch import nn
  7. from torch.nn import Linear
  8. from torch.utils.data import DataLoader
  9. from torch.utils.tensorboard import SummaryWriter
  10. dataset = torchvision.datasets.CIFAR10("./dataset",train=False,transform=torchvision.transforms.ToTensor(),
  11. download=True)
  12. dataloader = DataLoader(dataset,batch_size=64)
  13. class Tudui(nn.Module):
  14. def __init__(self):
  15. super(Tudui, self).__init__()
  16. self.linear1 = Linear(196608,10)
  17. def forward(self,input):
  18. output = self.linear1(input)
  19. return output
  20. tudui = Tudui()
  21. #writer = SummaryWriter("logs")
  22. #step = 0
  23. for data in dataloader:
  24. imgs,tragets = data
  25. print(imgs.shape)
  26. #writer.add_images("imgs",imgs,step)
  27. #output = torch.reshape(imgs,(1,1,1,-1))
  28. output = torch.flatten(imgs)
  29. print(output.shape)
  30. output = tudui(output)
  31. print(output.shape)
  32. #writer.add_images("linear",output,step)
  33. #step += 1
  34. #writer.close()

12.神经网络-搭建小实战和Sequential的使用

 CIFAR 10 model结构

 

  1. import torch
  2. from torch import nn
  3. from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
  4. from torch.utils.tensorboard import SummaryWriter
  5. class Tudui(nn.Module):
  6. def __init__(self):
  7. super(Tudui, self).__init__()
  8. self.conv1 = Conv2d(3,32,5,padding=2)
  9. self.maxpool1 = MaxPool2d(2)
  10. self.conv2 = Conv2d(32,32,5,padding=2)
  11. self.maxpool2 = MaxPool2d(2)
  12. self.conv3 = Conv2d(32,64,5,padding=2)
  13. self.maxpool3 = MaxPool2d(2)
  14. self.flatten = Flatten()
  15. self.Linear1 = Linear(1024,64)
  16. self.Linear2 = Linear(64,10)
  17. self.model1 = Sequential(
  18. Conv2d(3,32,5,padding=2),
  19. MaxPool2d(2),
  20. Conv2d(32, 32, 5, padding=2),
  21. MaxPool2d(2),
  22. Conv2d(32, 64, 5, padding=2),
  23. MaxPool2d(2),
  24. Flatten(),
  25. Linear(1024, 64),
  26. Linear(64, 10)
  27. )
  28. def forward(self,x):
  29. # x = self.conv1(x)
  30. # x = self.maxpool1(x)
  31. # x = self.conv2(x)
  32. # x = self.maxpool2(x)
  33. # x = self.conv3(x)
  34. # x = self.maxpool3(x)
  35. # x = self.flatten(x)
  36. # x = self.Linear1(x)
  37. # x = self.Linear2(x)
  38. x = self.model1(x)
  39. return x
  40. tudui = Tudui()
  41. input = torch.ones((64,3,32,32))
  42. output = tudui(input)
  43. print(output)
  44. writer = SummaryWriter("logs_seq")
  45. writer.add_graph(tudui,input)
  46. writer.close()

13.损失函数

 

  1. '''
  2. nn.loss
  3. '''
  4. import torch
  5. from torch.nn import L1Loss
  6. inputs = torch.tensor([1,2,3],dtype=torch.float32)
  7. targets = torch.tensor([1,2,5],dtype=torch.float32)
  8. inputs = torch.reshape(inputs,(1,1,1,3))
  9. targets = torch.reshape(targets,(1,1,1,3))
  10. loss =L1Loss(reduction='sum')
  11. result = loss(inputs,targets)
  12. print(result)

  1. '''
  2. nn.MSEloss
  3. '''
  4. import torch
  5. from torch.nn import L1Loss
  6. from torch import nn
  7. inputs = torch.tensor([1,2,3],dtype=torch.float32)
  8. targets = torch.tensor([1,2,5],dtype=torch.float32)
  9. inputs = torch.reshape(inputs,(1,1,1,3))
  10. targets = torch.reshape(targets,(1,1,1,3))
  11. loss =L1Loss(reduction='sum')
  12. result = loss(inputs,targets)
  13. loss_mse = nn.MSELoss()
  14. result_mse = loss_mse(inputs,targets)
  15. print(result)
  16. print(result_mse)

  1. '''
  2. nn.CrossEntropyLoss
  3. '''
  4. import torch
  5. from torch.nn import L1Loss
  6. from torch import nn
  7. inputs = torch.tensor([1,2,3],dtype=torch.float32)
  8. targets = torch.tensor([1,2,5],dtype=torch.float32)
  9. inputs = torch.reshape(inputs,(1,1,1,3))
  10. targets = torch.reshape(targets,(1,1,1,3))
  11. loss =L1Loss(reduction='sum')
  12. result = loss(inputs,targets)
  13. loss_mse = nn.MSELoss()
  14. result_mse = loss_mse(inputs,targets)
  15. print(result)
  16. print(result_mse)
  17. x = torch.tensor([0.1,0.2,0.3])
  18. y = torch.tensor([1])
  19. x = torch.reshape(x,(1,3))
  20. loss_cross = nn.CrossEntropyLoss()
  21. result_cross = loss_cross(x,y)
  22. print(result_cross)

  1. import torchvision
  2. from torch import nn
  3. from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
  4. from torch.utils.data import DataLoader
  5. dataset = torchvision.datasets.CIFAR10("./data",train=False,transform=torchvision.transforms.ToTensor(),
  6. download=True)
  7. dataloader = DataLoader(dataset,batch_size = 1)
  8. class Tudui(nn.Module):
  9. def __init__(self):
  10. super(Tudui, self).__init__()
  11. self.conv1 = Conv2d(3,32,5,padding=2)
  12. self.maxpool1 = MaxPool2d(2)
  13. self.conv2 = Conv2d(32,32,5,padding=2)
  14. self.maxpool2 = MaxPool2d(2)
  15. self.conv3 = Conv2d(32,64,5,padding=2)
  16. self.maxpool3 = MaxPool2d(2)
  17. self.flatten = Flatten()
  18. self.Linear1 = Linear(1024,64)
  19. self.Linear2 = Linear(64,10)
  20. self.model1 = Sequential(
  21. Conv2d(3,32,5,padding=2),
  22. MaxPool2d(2),
  23. Conv2d(32, 32, 5, padding=2),
  24. MaxPool2d(2),
  25. Conv2d(32, 64, 5, padding=2),
  26. MaxPool2d(2),
  27. Flatten(),
  28. Linear(1024, 64),
  29. Linear(64, 10)
  30. )
  31. def forward(self,x):
  32. # x = self.conv1(x)
  33. # x = self.maxpool1(x)
  34. # x = self.conv2(x)
  35. # x = self.maxpool2(x)
  36. # x = self.conv3(x)
  37. # x = self.maxpool3(x)
  38. # x = self.flatten(x)
  39. # x = self.Linear1(x)
  40. # x = self.Linear2(x)
  41. x = self.model1(x)
  42. return x
  43. loss = nn.CrossEntropyLoss()
  44. tudui = Tudui()
  45. for data in dataloader:
  46. imgs,targets = data
  47. outputs = tudui(imgs)
  48. result_loss = loss(outputs,targets)
  49. result_loss.backward()
  50. print("ok")
  51. print(result_loss)
  52. print(outputs)
  53. print(targets)

14.优化器

  1. import torch
  2. import torchvision
  3. from torch import nn
  4. from torch.nn import Conv2d, MaxPool2d, Flatten, Linear, Sequential
  5. from torch.utils.data import DataLoader
  6. dataset = torchvision.datasets.CIFAR10("./data",train=False,transform=torchvision.transforms.ToTensor(),
  7. download=True)
  8. dataloader = DataLoader(dataset,batch_size = 1)
  9. class Tudui(nn.Module):
  10. def __init__(self):
  11. super(Tudui, self).__init__()
  12. self.conv1 = Conv2d(3,32,5,padding=2)
  13. self.maxpool1 = MaxPool2d(2)
  14. self.conv2 = Conv2d(32,32,5,padding=2)
  15. self.maxpool2 = MaxPool2d(2)
  16. self.conv3 = Conv2d(32,64,5,padding=2)
  17. self.maxpool3 = MaxPool2d(2)
  18. self.flatten = Flatten()
  19. self.Linear1 = Linear(1024,64)
  20. self.Linear2 = Linear(64,10)
  21. self.model1 = Sequential(
  22. Conv2d(3,32,5,padding=2),
  23. MaxPool2d(2),
  24. Conv2d(32, 32, 5, padding=2),
  25. MaxPool2d(2),
  26. Conv2d(32, 64, 5, padding=2),
  27. MaxPool2d(2),
  28. Flatten(),
  29. Linear(1024, 64),
  30. Linear(64, 10)
  31. )
  32. def forward(self,x):
  33. # x = self.conv1(x)
  34. # x = self.maxpool1(x)
  35. # x = self.conv2(x)
  36. # x = self.maxpool2(x)
  37. # x = self.conv3(x)
  38. # x = self.maxpool3(x)
  39. # x = self.flatten(x)
  40. # x = self.Linear1(x)
  41. # x = self.Linear2(x)
  42. x = self.model1(x)
  43. return x
  44. loss = nn.CrossEntropyLoss()
  45. tudui = Tudui()
  46. optim = torch.optim.SGD(tudui.parameters(),lr = 0.01,)
  47. for epoch in range(20):
  48. running_loss = 0.0
  49. for data in dataloader:
  50. imgs,targets = data
  51. outputs = tudui(imgs)
  52. result_loss = loss(outputs,targets)# 计算损失
  53. optim.zero_grad()# 梯度清零
  54. result_loss.backward()# 反向传播,求出每个参数的梯度
  55. optim.step() #对权重进行更新
  56. running_loss = running_loss + result_loss
  57. print(running_loss)

15.现有模型的使用和修改

  1. import torchvision
  2. # train_data = torchvision.datasets.ImageNet("./dataset",split='train',download=True,
  3. # transform=torchvision.transforms.ToTensor())
  4. from torch import nn
  5. vgg16_false = torchvision.models.vgg16(pretrained=False)
  6. vgg16_true = torchvision.models.vgg16(pretrained=True)
  7. print("ok")
  8. print(vgg16_true)
  9. train_data = torchvision.datasets.CIFAR10('./data',train=True,transform=torchvision.transforms.ToTensor(),
  10. download=True)
  11. # 修改vgg16网络模型的结构
  12. vgg16_true.classifier.add_module('add_liner',nn.Linear(1000,10))
  13. print(vgg16_true)
  14. print(vgg16_false)
  15. vgg16_false.classifier[6] = nn.Linear(4096,10)
  16. print(vgg16_false)

16.网络模型的保存与读取

 

自己定义模型

  1. from torch import nn
  2. class Tudui(nn.Module):
  3. def __init__(self):
  4. super(Tudui, self).__init__()
  5. self.conv1 = nn.Conv2d(3,64,kernel_size=3)
  6. def forward(self,x):
  7. x = self.conv1(x)
  8. return x

保存模型

  1. import torch
  2. import torchvision
  3. vgg16 = torchvision.models.vgg16(pretrained=False) #加载vgg16初始的模型
  4. #保存方式1
  5. torch.save(vgg16,"vgg16_method1.pth")
  1. import torch
  2. import torchvision
  3. from torch import nn
  4. from Tudui import Tudui
  5. # vgg16 = torchvision.models.vgg16(pretrained=False) #加载vgg16初始的模型
  6. #
  7. # #保存方式1 模型的结构+模型的参数
  8. # torch.save(vgg16,"vgg16_method1.pth")
  9. #
  10. # #保存方式2 模型的参数(官方的推荐)保存为字典的形式
  11. # torch.save(vgg16.state_dict(),"vgg16_method2.pth")
  12. #陷阱
  13. # class Tudui(nn.Module):
  14. # def __init__(self):
  15. # super(Tudui, self).__init__()
  16. # self.conv1 = nn.Conv2d(3,64,kernel_size=3)
  17. #
  18. # def forward(self,x):
  19. # x = self.conv1(x)
  20. # return x
  21. tudui = Tudui()
  22. torch.save(tudui,"tudui_method1.pth")
  23. print("over")

加载模型

  1. import torch
  2. import torchvision
  3. vgg16 = torchvision.models.vgg16(pretrained=False) #加载vgg16初始的模型
  4. #保存方式1
  5. torch.save(vgg16,"vgg16_method1.pth")
  1. """
  2. 加载模型
  3. """
  4. import torch
  5. #保存方式1的加载模型的方法
  6. import torchvision
  7. # model = torch.load("vgg16_method1.pth")
  8. #print(model)
  9. #方式2的加载模型的方法
  10. # # model = torch.load("vgg16_method2.pth")
  11. # print(model)
  12. vgg16 = torchvision.models.vgg16(pretrained=False)
  13. vgg16.load_state_dict((torch.load("vgg16_method2.pth")))
  14. # print(vgg16)
  15. #陷阱
  16. model = torch.load("tudui_method1.pth")
  17. print(model)

17.完整的模型训练的套路

 定义网络模型Model.py

  1. #搭建神经网络
  2. from torch import nn
  3. class Tudui(nn.Module):
  4. def __init__(self):
  5. super(Tudui, self).__init__()
  6. #使用序列化的方法更新神经网络的各个层
  7. self.model = nn.Sequential(
  8. nn.Conv2d(3,32,kernel_size=5,stride=1,padding=2),
  9. nn.MaxPool2d(kernel_size=2),
  10. nn.Conv2d(32,32,kernel_size=5,stride=1,padding=2),
  11. nn.MaxPool2d(2),
  12. nn.Conv2d(32,64,kernel_size=5,stride=1,padding=2),
  13. nn.MaxPool2d(2),
  14. nn.Flatten(),
  15. nn. Linear(64*4*4,64),
  16. nn.Linear(64,10)
  17. )
  18. #定义前向传播
  19. def forward(self,x):
  20. x = self.model(x)
  21. return x

完整的模型训练套路train.py

  1. """
  2. 完整的模型训练的套路
  3. """
  4. #准备数据集
  5. import torch
  6. import torchvision
  7. from torch import nn
  8. from torch.utils.data import DataLoader
  9. from torch.utils.tensorboard import SummaryWriter
  10. from Model import Tudui
  11. train_data = torchvision.datasets.CIFAR10("./data",
  12. train=True,
  13. transform=torchvision.transforms.ToTensor(),
  14. download=True)
  15. test_data = torchvision.datasets.CIFAR10("./data",
  16. train=False,
  17. transform=torchvision.transforms.ToTensor(),
  18. download=True)
  19. #查看训练集和测试集有多少张
  20. #length 长度
  21. train_data_size = len(train_data) #训练集的长度
  22. test_data_size = len(test_data) #测试集的长度
  23. print(f"训练集的长度为{train_data_size}\n")
  24. print(f"测试集的长度为{test_data_size}\n")
  25. # 利用DataLoader 来加载数据集
  26. train_dataloader = DataLoader(train_data,batch_size=64)
  27. test_dataloader = DataLoader(test_data,batch_size=64)
  28. #主函数
  29. if __name__ == '__main__':
  30. # 创建网络模型
  31. tudui = Tudui()
  32. #小测试
  33. input = torch.ones((64,3,32,32))
  34. output = tudui(input)
  35. print(output.shape)
  36. """
  37. torch.Size([64, 10])
  38. 64是代表64张照片
  39. 10是代表10个类别,每张图片10各类别上分别的概率
  40. """
  41. # 损失函数
  42. loss_fn = nn.CrossEntropyLoss()
  43. #学习速率
  44. #1e-2=1x10^(-2)
  45. learning_rate = 1e-2
  46. # 优化器
  47. optimizer = torch.optim.SGD(tudui.parameters(),lr = learning_rate,)
  48. #设置训练网络的一些参数
  49. total_train_step = 0 #记录训练的次数
  50. total_test_step = 0 #记录测试的次数
  51. #训练的次数
  52. epoch = 10
  53. #添加tensorboard
  54. writer = SummaryWriter("logs_train")
  55. for i in range(epoch):
  56. print(f"------第{i+1}轮训练开始------")
  57. # 训练步骤开始
  58. tudui.train()
  59. for data in train_dataloader:
  60. imgs,targets = data
  61. outputs = tudui(imgs)
  62. loss = loss_fn(outputs,targets)
  63. #优化器的调优
  64. optimizer.zero_grad()# 梯度清零
  65. loss.backward()# 反向传播
  66. optimizer.step()# 更新优化参数
  67. total_train_step = total_train_step + 1 #训练次数加1
  68. if total_train_step % 100 == 0:# 每个一百次输出一次训练的结果
  69. print(f"训练次数:{total_train_step},Loss:{loss.item()}") #记录每次训练的损失结果,item()主要就是把loss转化为真实的数,其实转化不转化都行的
  70. writer.add_scalar("train_loss",loss.item(),total_train_step)
  71. #测试步骤开始
  72. tudui.eval()
  73. total_test_loss = 0
  74. total_accuracy = 0
  75. with torch.no_grad():#防止调优,测试时不需要进行调优
  76. for data in test_dataloader:
  77. imgs , targets = data
  78. outputs = tudui(imgs)
  79. loss = loss_fn(outputs,targets)
  80. total_test_loss = total_test_loss + loss
  81. #计算整体的正确率
  82. accuracy = (outputs.argmax(1) == targets).sum()
  83. total_accuracy = total_accuracy + accuracy
  84. print(f"整体测试集上的正确率{total_accuracy/test_data_size}")
  85. print(f"整体测试集上的Loss:{total_test_loss}")
  86. writer.add_scalar("test_loss",total_test_loss,total_test_step)
  87. writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
  88. total_test_step = total_test_step + 1
  89. #保存模型
  90. torch.save(tudui,f"tudui_{i}.ph")
  91. #torch.save(tudui.state_dict(),f"tudui_{i}.ph")
  92. print("模型已保存")
  93. writer.close()

18.利用GPU进行训练

  1. """
  2. 完整的模型训练的套路
  3. """
  4. #准备数据集
  5. import torch
  6. import torchvision
  7. from torch import nn
  8. from torch.utils.data import DataLoader
  9. from torch.utils.tensorboard import SummaryWriter
  10. import time
  11. from Model import Tudui
  12. train_data = torchvision.datasets.CIFAR10("./data",
  13. train=True,
  14. transform=torchvision.transforms.ToTensor(),
  15. download=True)
  16. test_data = torchvision.datasets.CIFAR10("./data",
  17. train=False,
  18. transform=torchvision.transforms.ToTensor(),
  19. download=True)
  20. #查看训练集和测试集有多少张
  21. #length 长度
  22. train_data_size = len(train_data) #训练集的长度
  23. test_data_size = len(test_data) #测试集的长度
  24. print(f"训练集的长度为{train_data_size}\n")
  25. print(f"测试集的长度为{test_data_size}\n")
  26. # 利用DataLoader 来加载数据集
  27. train_dataloader = DataLoader(train_data,batch_size=64)
  28. test_dataloader = DataLoader(test_data,batch_size=64)
  29. #主函数
  30. if __name__ == '__main__':
  31. # 创建网络模型
  32. tudui = Tudui()
  33. if torch.cuda.is_available():
  34. tudui = tudui.cuda()
  35. #小测试
  36. input = torch.ones((64,3,32,32))
  37. device = torch.device('cuda:0') #将tensor.cpu类型的数据转化为tensor.gpu类型的数据
  38. input = input.to(device)
  39. output = tudui(input)
  40. print(output.shape)
  41. """
  42. torch.Size([64, 10])
  43. 64是代表64张照片
  44. 10是代表10个类别,每张图片10各类别上分别的概率
  45. """
  46. # 损失函数
  47. loss_fn = nn.CrossEntropyLoss()
  48. if torch.cuda.is_available():
  49. loss_fn = loss_fn.cuda()
  50. #学习速率
  51. #1e-2=1x10^(-2)
  52. learning_rate = 1e-2
  53. # 优化器
  54. optimizer = torch.optim.SGD(tudui.parameters(),lr = learning_rate,)
  55. #设置训练网络的一些参数
  56. total_train_step = 0 #记录训练的次数
  57. total_test_step = 0 #记录测试的次数
  58. #训练的次数
  59. epoch = 10
  60. #添加tensorboard
  61. writer = SummaryWriter("logs_train")
  62. start_time = time.time() #开始训练的时间
  63. for i in range(epoch):
  64. print(f"------第{i+1}轮训练开始------")
  65. # 训练步骤开始
  66. tudui.train()
  67. for data in train_dataloader:
  68. imgs,targets = data
  69. if torch.cuda.is_available():
  70. imgs = imgs.cuda()
  71. targets = targets.cuda()
  72. outputs = tudui(imgs)
  73. loss = loss_fn(outputs,targets)
  74. #优化器的调优
  75. optimizer.zero_grad()# 梯度清零
  76. loss.backward()# 反向传播
  77. optimizer.step()# 更新优化参数
  78. total_train_step = total_train_step + 1 #训练次数加1
  79. if total_train_step % 100 == 0:# 每个一百次输出一次训练的结果
  80. end_time = time.time() #结束时间
  81. print(end_time - start_time) #计算100次训练的间隔的时间
  82. print(f"训练次数:{total_train_step},Loss:{loss.item()}") #记录每次训练的损失结果,item()主要就是把loss转化为真实的数,其实转化不转化都行的
  83. writer.add_scalar("train_loss",loss.item(),total_train_step)
  84. #测试步骤开始
  85. tudui.eval()
  86. total_test_loss = 0
  87. total_accuracy = 0
  88. with torch.no_grad():#防止调优,测试时不需要进行调优
  89. for data in test_dataloader:
  90. imgs , targets = data
  91. if torch.cuda.is_available():
  92. imgs = imgs.cuda()
  93. targets = targets.cuda()
  94. outputs = tudui(imgs)
  95. loss = loss_fn(outputs,targets)
  96. total_test_loss = total_test_loss + loss
  97. #计算整体的正确率
  98. accuracy = (outputs.argmax(1) == targets).sum()
  99. total_accuracy = total_accuracy + accuracy
  100. print(f"整体测试集上的正确率{total_accuracy/test_data_size}")
  101. print(f"整体测试集上的Loss:{total_test_loss}")
  102. writer.add_scalar("test_loss",total_test_loss,total_test_step)
  103. writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
  104. total_test_step = total_test_step + 1
  105. #保存模型
  106. torch.save(tudui,f"tudui_{i}.ph")
  107. #torch.save(tudui.state_dict(),f"tudui_{i}.ph")
  108. print("模型已保存")
  109. writer.close()

  1. """
  2. 完整的模型训练的套路
  3. """
  4. #准备数据集
  5. import torch
  6. import torchvision
  7. from torch import nn
  8. from torch.utils.data import DataLoader
  9. from torch.utils.tensorboard import SummaryWriter
  10. import time
  11. from Model import Tudui
  12. #定义训练的设备
  13. # device = torch.device("cuda:0")
  14. device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
  15. train_data = torchvision.datasets.CIFAR10("./data",
  16. train=True,
  17. transform=torchvision.transforms.ToTensor(),
  18. download=True)
  19. test_data = torchvision.datasets.CIFAR10("./data",
  20. train=False,
  21. transform=torchvision.transforms.ToTensor(),
  22. download=True)
  23. #查看训练集和测试集有多少张
  24. #length 长度
  25. train_data_size = len(train_data) #训练集的长度
  26. test_data_size = len(test_data) #测试集的长度
  27. print(f"训练集的长度为{train_data_size}\n")
  28. print(f"测试集的长度为{test_data_size}\n")
  29. # 利用DataLoader 来加载数据集
  30. train_dataloader = DataLoader(train_data,batch_size=64)
  31. test_dataloader = DataLoader(test_data,batch_size=64)
  32. #主函数
  33. if __name__ == '__main__':
  34. # 创建网络模型
  35. tudui = Tudui()
  36. tudui = tudui.to(device)
  37. #小测试
  38. input = torch.ones((64,3,32,32))
  39. input = input.to(device)
  40. output = tudui(input)
  41. print(output.shape)
  42. """
  43. torch.Size([64, 10])
  44. 64是代表64张照片
  45. 10是代表10个类别,每张图片10各类别上分别的概率
  46. """
  47. # 损失函数
  48. loss_fn = nn.CrossEntropyLoss()
  49. loss_fn = loss_fn.to(device)
  50. #学习速率
  51. #1e-2=1x10^(-2)
  52. learning_rate = 1e-2
  53. # 优化器
  54. optimizer = torch.optim.SGD(tudui.parameters(),lr = learning_rate,)
  55. #设置训练网络的一些参数
  56. total_train_step = 0 #记录训练的次数
  57. total_test_step = 0 #记录测试的次数
  58. #训练的次数
  59. epoch = 10
  60. #添加tensorboard
  61. writer = SummaryWriter("logs_train")
  62. start_time = time.time() #开始训练的时间
  63. for i in range(epoch):
  64. print(f"------第{i+1}轮训练开始------")
  65. # 训练步骤开始
  66. tudui.train()
  67. for data in train_dataloader:
  68. imgs,targets = data
  69. imgs = imgs.to(device)
  70. targets = targets.to(device)
  71. outputs = tudui(imgs)
  72. loss = loss_fn(outputs,targets)
  73. #优化器的调优
  74. optimizer.zero_grad()# 梯度清零
  75. loss.backward()# 反向传播
  76. optimizer.step()# 更新优化参数
  77. total_train_step = total_train_step + 1 #训练次数加1
  78. if total_train_step % 100 == 0:# 每个一百次输出一次训练的结果
  79. end_time = time.time() #结束时间
  80. print(end_time - start_time) #计算100次训练的间隔的时间
  81. print(f"训练次数:{total_train_step},Loss:{loss.item()}") #记录每次训练的损失结果,item()主要就是把loss转化为真实的数,其实转化不转化都行的
  82. writer.add_scalar("train_loss",loss.item(),total_train_step)
  83. #测试步骤开始
  84. tudui.eval()
  85. total_test_loss = 0
  86. total_accuracy = 0
  87. with torch.no_grad():#防止调优,测试时不需要进行调优
  88. for data in test_dataloader:
  89. imgs , targets = data
  90. imgs = imgs.to(device)
  91. targets = targets.to(device)
  92. outputs = tudui(imgs)
  93. loss = loss_fn(outputs,targets)
  94. total_test_loss = total_test_loss + loss
  95. #计算整体的正确率
  96. accuracy = (outputs.argmax(1) == targets).sum()
  97. total_accuracy = total_accuracy + accuracy
  98. print(f"整体测试集上的正确率{total_accuracy/test_data_size}")
  99. print(f"整体测试集上的Loss:{total_test_loss}")
  100. writer.add_scalar("test_loss",total_test_loss,total_test_step)
  101. writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step)
  102. total_test_step = total_test_step + 1
  103. #保存模型
  104. torch.save(tudui,f"tudui_{i}.ph")
  105. #torch.save(tudui.state_dict(),f"tudui_{i}.ph")
  106. print("模型已保存")
  107. writer.close()

如果没有GPU怎么办呢?

没有GPU的话,我们可以使用谷歌提供colab,可能访问这个网站的话需要进行科学上网

 19.完整的模型验证套路

利用已经训练好的模型,然后给它提供测试

  1. # -*- coding: utf-8 -*-
  2. # 作者:小土堆
  3. # 公众号:土堆碎念
  4. import torch
  5. import torchvision
  6. from PIL import Image
  7. from torch import nn
  8. image_path = "../imgs/airplane.png"
  9. image = Image.open(image_path)
  10. print(image)
  11. image = image.convert('RGB')
  12. transform = torchvision.transforms.Compose([torchvision.transforms.Resize((32, 32)),
  13. torchvision.transforms.ToTensor()])
  14. image = transform(image)
  15. print(image.shape)
  16. class Tudui(nn.Module):
  17. def __init__(self):
  18. super(Tudui, self).__init__()
  19. self.model = nn.Sequential(
  20. nn.Conv2d(3, 32, 5, 1, 2),
  21. nn.MaxPool2d(2),
  22. nn.Conv2d(32, 32, 5, 1, 2),
  23. nn.MaxPool2d(2),
  24. nn.Conv2d(32, 64, 5, 1, 2),
  25. nn.MaxPool2d(2),
  26. nn.Flatten(),
  27. nn.Linear(64*4*4, 64),
  28. nn.Linear(64, 10)
  29. )
  30. def forward(self, x):
  31. x = self.model(x)
  32. return x
  33. model = torch.load("tudui_29_gpu.pth", map_location=torch.device('cpu'))
  34. print(model)
  35. image = torch.reshape(image, (1, 3, 32, 32))
  36. model.eval()
  37. with torch.no_grad():
  38. output = model(image)
  39. print(output)
  40. print(output.argmax(1))

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/从前慢现在也慢/article/detail/360138
推荐阅读
相关标签
  

闽ICP备14008679号