赞
踩
dir(): 打开,看见
helo(): 说明书
使用python控制台运行
import torch
tourch.cuda.is_available()
dir(torch)
dir(torch.cuda)
dir(torch.cuda.is_available)
help(torch.cuda.is_available())
命令行输入:jupyter notebook打开jupyter
import os.path from torch.utils.data import Dataset from PIL import Image class MyData(Dataset): def __init__(self, root_dir, label_dir): self.root_dir = root_dir self.label_dir = label_dir self.path = os.path.join(self.root_dir, self.label_dir) //路径拼接 //图像路径数组 self.img_path = os.listdir(self.path) def __getitem__(self, idx): img_name = self.img_path[idx] img_item_path = os.path.join(self.root_dir, self.label_dir, img_name) img = Image.open(img_item_path) label = self.label_dir return img, label def __len__(self): return len(self.img_path) root_dir = 'hymenoptera_data/train' ants_label_dir = 'ants' bees_label_dir = 'bees' bees_dataset= MyData(root_dir,bees_label_dir) ants_dataset = MyData(root_dir, ants_label_dir) train_dataset=ants_dataset+bees_dataset
from torch.utils.tensorboard import SummaryWriter
writer=SummaryWriter("logs")
for i in range(100):
writer.add_scalar("y=x", i, i)
writer.close()
使用查看图像
tensorboard --logdir=logs # --port=6007 更换端口号
显示图像
from torch.utils.tensorboard import SummaryWriter import numpy as np from PIL import Image writer=SummaryWriter("logs") image_path= "hymenoptera_data/train/bees/85112639_6e860b0469.jpg" # 打开图像,转换类型 img_PIL=Image.open(image_path) img_array=np.array(img_PIL) print(type(img_array)) # 图像大小 print(img_array.shape) # 默认格式是CHW ,转换格式 1 可以显示多少图片 writer.add_image("test",img_array,1,dataformats="HWC") writer.close()
from PIL import Image from torch.utils.tensorboard import SummaryWriter from torchvision import transforms img_path="hymenoptera_data/train/ants/175998972.jpg" img=Image.open(img_path) writer=SummaryWriter("logs") print(img) # 1. 如何使用transforms # 创建对象,加载图片 tensor_trans= transforms.ToTensor() tensor_img=tensor_trans(img) # tensorboard 加载 writer.add_image("Tensor_img",tensor_img) print(tensor_img)
from PIL import Image from torch.utils.tensorboard import SummaryWriter from torchvision import transforms writer = SummaryWriter('logs') img = Image.open("hymenoptera_data/train/ants/kurokusa.jpg") print(img) # ToTensor 图像加载 trans_totensor = transforms.ToTensor() img_tensor = trans_totensor(img) writer.add_image("ToTensor", img_tensor) # Normalize 归一化 print((img_tensor[0][0][0])) trans_norm = transforms.Normalize([0.5, 5, 0.5], [9, 0.5, 0.5]) img_norm = trans_norm(img_tensor) print((img_tensor[0][0][0])) writer.add_image("Normalize", img_norm, 2) # Resize print(img.size) transforms_resize = transforms.Resize((512, 512)) resize_img = transforms_resize(img) resize_img = trans_totensor(resize_img) writer.add_image("Resize", resize_img, 0) print(resize_img) # Compose trans_resize2 = transforms.Resize(512) transforms_compose = transforms.Compose([trans_resize2, trans_totensor]) resize_img2 = transforms_compose(img) writer.add_image("Resize", resize_img2, 1) # RandomCrop trans_random = transforms.RandomCrop(500, 1000) trans_compose2 = transforms.Compose([trans_random, trans_totensor]) for i in range(10): img_crop = trans_compose2(img) writer.add_image("RandomCrop", img_crop, i) writer.close()
import torchvision from tensorboardX import SummaryWriter # 数据集转化成ToTensor dataset_transform=torchvision.transforms.Compose([ torchvision.transforms.ToTensor() ]) # S = torchvision.datasets.SBDataset("./dataset", "train", "boundaries", True) # 添加transform=dataset_transform train_set = torchvision.datasets.CIFAR10(root="./dataset",train=True,transform=dataset_transform,download=True) test_set = torchvision.datasets.CIFAR10(root="./dataset",train=False,transform=dataset_transform,download=True) # print(test_set[0]) # print(test_set.classes) # # img, target=test_set[0] # print(img) # print(target) # print(test_set.classes[target]) # img.show() # print(test_set[0]) writer=SummaryWriter("p10") # 图片加载 for i in range(10): img,target=test_set[i] writer.add_image("test_set",img,i) writer.close()
import torchvision from tensorboardX import SummaryWriter from torch.utils.data import DataLoader test_data=torchvision.datasets.CIFAR10("dataset",False,torchvision.transforms.ToTensor()) test_loader=DataLoader(dataset=test_data,batch_size=4,shuffle=False,num_workers=0,drop_last=True) # 测试数据集的图片 img,target=test_data[0] print(img.shape) print(target) writer=SummaryWriter("dataloader") for epoch in range(2): step=0 for data in test_loader: imgs, target=data writer.add_images("Epoch:{}".format(epoch),imgs,step) step=step+1 writer.close()
import torch import torch.nn.functional as F input =torch.tensor([[1,2,0,3,1], [0,1,2,3,2], [1,2,1,0,0], [5,2,3,1,1], [2,1,0,1,1]]) kernel=torch.tensor([[1,2,1], [0,1,0], [2,1,0]]) # 卷积就是使用3X3 矩阵剩 5X5 矩阵 3X3 不断在5X5的上面移动最后得出3x3的矩阵 # 1x1 2x2 1x0 0x0 1x1 2x0 2x5 1x2 3x1 = 21 3X3左移再剩 input =torch.reshape(input,(1,1,5,5)) # batchsize channel kernel=torch.reshape(kernel,(1,1,3,3)) print(input.shape) print(kernel.shape) output=F.conv2d(input,kernel,stride=1,padding=1) # stride 步长 padding=1 上下左右都填充的一行 print(output)
dilation 就是空洞卷积 ,隔一个元素再乘
dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) dataloader = DataLoader(dataset, batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.conv1 = Conv2d(in_channels=3, out_channels=6, kernel_size=3, stride=1, padding=0) # 多少个卷积核就有多少个输入通道 def forward(self, x): x = self.conv1(x) return x tudui = Tudui() writer = SummaryWriter("../logs") step = 0 for data in dataloader: imgs, targets = data output = tudui(imgs) print(imgs.shape) print(output.shape) # torch.Size([64, 3, 32, 32]) writer.add_images("input", imgs, step) # 6 通道的卷积核不能展示图片 需要用rehsape 转换 # torch.Size([64, 6, 30, 30]) -> [xxx, 3, 30, 30] output = torch.reshape(output, (-1, 3, 30, 30)) #-1代替size writer.add_images("output", output, step) step = step + 1
最大池化操作, 3X3 的池化核就是在3x3里选择最大的元素
目的是保留输入特征,数据量减少
dataset = torchvision.datasets.CIFAR10("../data", train=False, download=True, transform=torchvision.transforms.ToTensor()) dataloader = DataLoader(dataset, batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.maxpool1 = MaxPool2d(kernel_size=3, ceil_mode=False) // 不足池化数,是否保留池化 def forward(self, input): output = self.maxpool1(input) return output tudui = Tudui() writer = SummaryWriter("../logs_maxpool") step = 0 for data in dataloader: imgs, targets = data writer.add_images("input", imgs, step) output = tudui(imgs) writer.add_images("output", output, step) step = step + 1 writer.close()
input = torch.tensor([[1, -11], [-3, 99]]) input = torch.reshape(input, (-1, 1, 2, 2)) print(input) print(input.shape) relu = ReLU() print(relu(input)) dataset = torchvision.datasets.CIFAR10("dataset", False, torchvision.transforms.ToTensor() , download=True) dataloader = DataLoader(dataset, batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.relu1 = ReLU() self.sigmoid1=Sigmoid() def forward(self, input): output = self.sigmoid1(input) return output tudui = Tudui() writer = SummaryWriter("logs_relu") step = 0 for data in dataloader: imgs, targets = data writer.add_images("input", imgs, global_step=step) output = tudui(imgs) writer.add_images("output", output, step) step += 1 writer.close()
dataset = torchvision.datasets.CIFAR10("../data", train=False, transform=torchvision.transforms.ToTensor(), download=True) dataloader = DataLoader(dataset, batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.linear1 = Linear(196608, 10) def forward(self, input): output = self.linear1(input) return output tudui = Tudui() for data in dataloader: imgs, targets = data print(imgs.shape) output = torch.flatten(imgs) print(output.shape) output = tudui(output) print(output.shape)
class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model1=Sequential( Conv2d(3,32,5,padding=2), MaxPool2d(2), Conv2d(32,32,5,padding=2), MaxPool2d(2), Conv2d(32,64,5,padding=2), MaxPool2d(2), Flatten(), Linear(1024,64), Linear(64,10) ) def forward(self,x): x=self.model1(x) return x tudui = Tudui() print(tudui) input=torch.ones((64,3,32,32)) output=tudui(input) print(output) writer=SummaryWriter("log_seq") # 网络结构图可视化 writer.add_graph(tudui,input) writer.close()
L1Loss 是 (1-1 + 2-2 + 5-3 )/ 3=0.667
MSE= (0+0+2^2)=1.333
CrossEntropyLoss
inputs = torch.tensor([1, 2, 3], dtype=torch.float32) targets = torch.tensor([1, 2, 5], dtype=torch.float32) inputs = torch.reshape(inputs, (1, 1, 1, 3)) targets = torch.reshape(targets, (1, 1, 1, 3)) #差的和 loss = L1Loss(reduction='sum') result = loss(inputs, targets) #方差的和/个数 loss_mse = nn.MSELoss() result_mse = loss_mse(inputs, targets) print(result) print(result_mse) x = torch.tensor([0.1, 0.2, 0.3]) y = torch.tensor([1]) x = torch.reshape(x, (1, 3)) loss_cross = nn.CrossEntropyLoss() result_cross = loss_cross(x, y) print(result_cross)
dataset=torchvision.datasets.CIFAR10("dataset",False,torchvision.transforms.ToTensor(), download=True) dataloader=DataLoader(dataset,batch_size=1) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model1=Sequential( Conv2d(3,32,5,padding=2), MaxPool2d(2), Conv2d(32,32,5,padding=2), MaxPool2d(2), Conv2d(32,64,5,padding=2), MaxPool2d(2), Flatten(), Linear(1024,64), Linear(64,10) ) def forward(self,x): x=self.model1(x) return x loss=nn.CrossEntropyLoss() tudui = Tudui() for data in dataloader: imgs,targets=data output=tudui(imgs) print(output) print(targets) result_loss=loss(output,targets) result_loss.backward() print(result_loss)
dataset=torchvision.datasets.CIFAR10("dataset",False,torchvision.transforms.ToTensor(), download=True) dataloader=DataLoader(dataset,batch_size=1) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model1=Sequential( Conv2d(3,32,5,padding=2), MaxPool2d(2), Conv2d(32,32,5,padding=2), MaxPool2d(2), Conv2d(32,64,5,padding=2), MaxPool2d(2), Flatten(), Linear(1024,64), Linear(64,10) ) def forward(self,x): x=self.model1(x) return x loss=nn.CrossEntropyLoss() tudui=Tudui() optim=torch.optim.SGD(tudui.parameters(),lr=0.01) for epoch in range(20): running_loss=0.0 for data in dataloader: imgs,targets=data outputs=tudui(imgs) result_loss=loss(outputs,targets) optim.zero_grad() result_loss.backward() optim.step() # print(result) running_loss=running_loss+result_loss print(running_loss)
vgg16_false = torchvision.models.vgg16(pretrained=False)
vgg16_true = torchvision.models.vgg16(pretrained=True)
print(vgg16_true)
train_data = torchvision.datasets.CIFAR10('../data', train=True, transform=torchvision.transforms.ToTensor(),
download=True)
# 增加线性层
vgg16_true.classifier.add_module('add_linear', nn.Linear(1000, 10))
print(vgg16_true)
print(vgg16_false)
vgg16_false.classifier[6] = nn.Linear(4096, 10)
print(vgg16_false)
保存
vgg16 = torchvision.models.vgg16(pretrained=False) # 保存方式1,模型结构+模型参数 torch.save(vgg16, "vgg16_method1.pth") # 保存方式2,模型参数(官方推荐) torch.save(vgg16.state_dict(), "vgg16_method2.pth") # 陷阱 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=3) def forward(self, x): x = self.conv1(x) return x tudui = Tudui() torch.save(tudui, "tudui_method1.pth")
读取
#model1 model=torch.load("vgg16_method1.pth") # print(model) # model2 # model=torch.load("vgg16_method2.pth") # print(model) vgg16=torchvision.models.vgg16(pretrained=False).load_state_dict(torch.load("vgg16_method2.pth")) # print(vgg16) # 陷阱 还是需要写出class Tudui 类,但是不需要创建了,用来确保确实是这个类 class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.conv1=nn.Conv2d(3,64,kernel_size=3) def forward(self): x=self.conv1(x) return x model_tudui=torch.load("tudui_method1.pth") # print(model_tudui) # 方法2只能load出字典,vgg16方法用不了 model_tudui2=torch.load("tudui_method2.pth") print(model_tudui2)
import torch.nn import torchvision from tensorboardX import SummaryWriter from torch import nn from torch.utils.data import DataLoader # from model import * import time device=torch.device("cuda") train_data= torchvision.datasets.CIFAR10(root="dataset",train=True,transform= torchvision.transforms.ToTensor(), download=True) test_data=torchvision.datasets.CIFAR10(root="dataset",train=False,transform=torchvision.transforms.ToTensor(),download=True) train_data_size=len(train_data) test_data_size=len(test_data) print("训练数据集的长度为:{}".format(train_data_size)) print("测试数据集的长度为:{}".format(test_data_size)) #加载数据集 train_dataloader=DataLoader(train_data,batch_size=64) test_dataloader=DataLoader(test_data,batch_size=64) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model=nn.Sequential( nn.Conv2d(3,32,5,1,2), nn.MaxPool2d(2), nn.Conv2d(32,32,5,1,2), nn.MaxPool2d(2), nn.Conv2d(32,64,5,1,2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(1024,64), nn.Linear(64,10) ) def forward(self,x): x=self.model(x) return x # 创建网络模型 tudui=Tudui() # # 这种判断方法全部都要加 # if torch.cuda.is_available(): # tudui = tudui.cuda() tudui=tudui.to(device) # 损失函数 loss_fn=nn.CrossEntropyLoss() loss_fn = loss_fn.to(device) # 优化器 # learing_rate=0.01 learing_rate=1e-2 optimizer=torch.optim.SGD(tudui.parameters(),lr=learing_rate) # 设置训练网络的一些参数 # 训练的次数 total_train_step=0 # 测试的次数 total_test_step=0 # 训练的轮数 epoch=10 # 添加tensorboard writer=SummaryWriter("logs_train") start_time=time.time() for i in range(epoch): print("第{}轮训练开始".format(i+1)) # 训练步骤开始 tudui.train() for data in train_dataloader: imgs,targets=data imgs = imgs.to(device) targets=targets.to(device) outputs=tudui(imgs) loss=loss_fn(outputs,targets) # 优化器优化模型 optimizer.zero_grad() loss.backward() optimizer.step() total_train_step=total_train_step+1 if total_train_step % 100==0: end_time=time.time() print(end_time-start_time) print("训练次数:{},loss: {}".format(total_train_step,loss.item())) writer.add_scalar("train_loss",loss.item(),total_train_step) # 测试开始 tudui.eval() total_test_loss=0 #整体正确个数 total_accuracy=0 with torch.no_grad(): for data in test_dataloader: imgs,targets=data imgs = imgs.to(device) targets = targets.to(device) outputs=tudui(imgs) loss=loss_fn(outputs,targets) total_test_loss=total_test_loss+loss accuracy=(outputs.argmax(1)==targets).sum() total_accuracy=total_accuracy+accuracy print("整体测试集上的loss:{}".format(total_test_loss)) print("整体测试集的正确率:{}".format(total_accuracy/test_data_size)) writer.add_scalar("test_loss",total_test_loss,total_test_step) writer.add_scalar("test_accuracy",total_accuracy/test_data_size,total_test_step) total_test_step=total_test_step+1 torch.save(tudui,"tudui_{}.pth".format(i)) print("模型已保存") writer.close()
# 创建网络模型 tudui=Tudui() # 这种判断方法全部都要加 if torch.cuda.is_available(): tudui = tudui.cuda() # 损失函数 loss_fn=nn.CrossEntropyLoss() loss_fn = loss_fn.cuda() # 训练步骤开始 tudui.train() for data in train_dataloader: imgs,targets=data imgs = imgs.cuda() targets=targets.cuda()
利用gpu方式二:
device=torch.device("cuda")
# 创建网络模型
tudui=Tudui()
tudui=tudui.to(device)
loss_fn = loss_fn.to(device)
img_path="img_1.png" img=Image.open(img_path) print(img) img = img.convert('RGB') transform=torchvision.transforms.Compose([torchvision.transforms.Resize((32,32)), torchvision.transforms.ToTensor()]) img=transform(img) print(img.shape) class Tudui(nn.Module): def __init__(self): super(Tudui, self).__init__() self.model=nn.Sequential( nn.Conv2d(3,32,5,1,2), nn.MaxPool2d(2), nn.Conv2d(32,32,5,1,2), nn.MaxPool2d(2), nn.Conv2d(32,64,5,1,2), nn.MaxPool2d(2), nn.Flatten(), nn.Linear(1024,64), nn.Linear(64,10) ) def forward(self,x): x=self.model(x) return x # model = torch.load("tudui_2.pth",map_location=torch.device('cpu')) model = torch.load("tudui_1.pth") print(model) image=torch.reshape(img,(1,3,32,32)) model.eval() with torch.no_grad(): output=model(image) print(output) print(output.argmax(1))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。