赞
踩
import torch from torch import nn from torch.nn import functional as F class Lenet5(nn.Module): def __init__(self): super(Lenet5, self).__init__() self.conv_unit=nn.Sequential( nn.Conv2d(3,16,kernel_size=5,stride=1,padding=0), nn.MaxPool2d(kernel_size=2,stride=2,padding=0), nn.Conv2d(16,32,kernel_size=5,stride=1,padding=0), nn.MaxPool2d(kernel_size=2,stride=2,padding=0), ) self.fc_unit=nn.Sequential( nn.Linear(32*5*5,32), nn.ReLU(), nn.Linear(32,10) ) def forward(self,x): batchsz=x.size(0) out=self.conv_unit(x) out=out.view(batchsz,-1) out=self.fc_unit(out) return out def main(): net = Lenet5() tmp = torch.randn(2, 3, 32, 32) out = net(tmp) print('lenet out:', out.shape) if __name__ == '__main__': main()
import torch from torch import nn from torch.nn import functional as F class ResBlk(nn.Module): def __init__(self,ch_in,ch_out,stride=1): super(ResBlk, self).__init__() self.conv1=nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=stride,padding=1) self.bn1=nn.BatchNorm2d(ch_out) self.conv2=nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1) self.bn2=nn.BatchNorm2d(ch_out) self.extra=nn.Sequential() if ch_in!=ch_out: self.extra=nn.Sequential(nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=stride), nn.BatchNorm2d(ch_out)) def forward(self,x): out=F.relu(self.bn1(self.conv1(x))) out=self.bn2(self.conv2(out)) extra=self.extra(x) out=out+extra out=F.relu(out) return out class ResNet18(nn.Module): def __init__(self): super(ResNet18, self).__init__() self.conv_first=nn.Sequential(nn.Conv2d(3,64,kernel_size=3,stride=3,padding=0), nn.BatchNorm2d(64)) self.BLK1= ResBlk(64,128,stride=2) self.BLK2 = ResBlk(128, 256, stride=2) self.BLK3 = ResBlk(256, 512, stride=2) self.BLK4 = ResBlk(512, 512, stride=2) self.outlayer=nn.Linear(512*1*1,10) def forward(self,x): out=F.relu(self.conv_first(x)) out=self.BLK1(out) out=self.BLK2(out) out=self.BLK3(out) out=self.BLK4(out) out=F.adaptive_avg_pool2d(out,[1,1]) out=out.view(x.size(0),-1) out=self.outlayer(out) return out def main(): blk = ResBlk(64, 128, stride=4) tmp = torch.randn(2, 64, 32, 32) out = blk(tmp) print('block:', out.shape) x = torch.randn(2, 3, 32, 32) model = ResNet18() out = model(x) print('resnet:', out.shape) if __name__ == '__main__': main()
import torch from torch.utils.data import DataLoader from torchvision import datasets,transforms from .lenet5 import Lenet5 from torch import nn from torch import optim def main(): '''加载数据集''' batchsz=128 cifar_train=datasets.CIFAR10("cifar",True,transform=transforms.Compose([ transforms.Resize((32,32)), transforms.ToTensor(), transforms.Normalize(mean=[0.485,0.456,0.406], std=[0.229,0.225,0.225]) ]),download=True) cifar_train=DataLoader(cifar_train,batch_size=batchsz,shuffle=True) cifar_test = datasets.CIFAR10("cifar", False, transform=transforms.Compose([ transforms.Resize((32, 32)), transforms.ToTensor(), transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.225, 0.225]) ]), download=True) cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True) x,label=next(iter(cifar_train)) print("x:",x.shape,"label:",label.shape) device=torch.device("cuda") model=Lenet5().to(device) criterion=nn.CrossEntropyLoss.to(device) optimizer=optim.Adam(model.parameters(),lr=1e-3) for epoch in range(1000): model.train() for batch_idx, (x,label) in enumerate(cifar_train): x, label = x.to(device), label.to(device) logits=model(x) loss=criterion(logits,label) optimizer.zero_grad() loss.backward() optimizer.step() print(epoch,"loss:",loss.item()) model.eval() with torch.no_grad: total_correct=0 total_num=0 for x,label in cifar_test: logits=model(x) pred=logits.argmax(dim=1) total_correct+=torch.eq(pred,label).float().sum().item() total_num+=x.size(0) acc=total_correct/total_num print(epoch,"acc:",acc) if __name__ == '__main__': main()
只用线性层的缺点:
一个naive RNN,weight只有两个参数 w i h , w h h w_{ih},w_{hh}
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。