赞
踩
Residual net(残差网络):将靠前若干层的某一层数据输出直接跳过多层引入到后面的数据层的输入部分;表明了后面的特征层的内容会有一部分由前面的某一层线性贡献。结构如下:
ResNet网络解决深度网络退化问题。采用ResNet网络结构的网络层数越深,性能越佳;如下图一显示,56层的深度网络的训练误差和测试误差反而比20层网络更大,这就是深度网络退化;而图二展示了使用ResNet结构前后,34层网络与18层网络的训练结果对比;
假设我们想要网络块学习到的映射为H(x),而直接学习H(x)是很难学习到的。若我们学习另一个残差函数F(x) = H(x) - x可以很容易学习,因为此时网络块的训练目标是将F(x)逼近于0,而不是某一特定映射。因此,最后的映射H(x)就是将F(x)和x相加,H(x) = F(x) + x,如图所示。
因此,这个网络块的输出y为
为了相加,必须保证加号左右侧维度相同,因此可写成通式如下,Ws用于匹配维度;
文中提到两种维度匹配的方式:(A)用zero-paddiing增加维度;(B)用1*1卷积增加维度;
在ResNet网络中,有两种基础块,分别是BasicBlock和BotteNeck;前者用于ResNet34以下的网络,后者用于ResNet50及以上的网络;
其包含两个基本块,分别叫做 Conv Block 和 Identity Block,其中Conv块输入和输出维度是不一样的,所以不能连续串联,其用于改变网络的维度;Identity块输入维度和输出维度相同,可以串联,用于加深网络;
Conv Block
Identity Block
整体结构如下:
resnet50.py
- import torch
- import torch.nn as nn
- from torch.nn import functional as F
-
-
- class ResNet50BasicBlock(nn.Module):
- def __init__(self, in_channel, outs, kernerl_size, stride, padding):
- super(ResNet50BasicBlock, self).__init__()
- self.conv1 = nn.Conv2d(in_channel, outs[0], kernel_size=kernerl_size[0], stride=stride[0], padding=padding[0])
- self.bn1 = nn.BatchNorm2d(outs[0])
- self.conv2 = nn.Conv2d(outs[0], outs[1], kernel_size=kernerl_size[1], stride=stride[0], padding=padding[1])
- self.bn2 = nn.BatchNorm2d(outs[1])
- self.conv3 = nn.Conv2d(outs[1], outs[2], kernel_size=kernerl_size[2], stride=stride[0], padding=padding[2])
- self.bn3 = nn.BatchNorm2d(outs[2])
-
- def forward(self, x):
- out = self.conv1(x)
- out = F.relu(self.bn1(out))
-
- out = self.conv2(out)
- out = F.relu(self.bn2(out))
-
- out = self.conv3(out)
- out = self.bn3(out)
-
- return F.relu(out + x)
-
-
- class ResNet50DownBlock(nn.Module):
- def __init__(self, in_channel, outs, kernel_size, stride, padding):
- super(ResNet50DownBlock, self).__init__()
- # out1, out2, out3 = outs
- # print(outs)
- self.conv1 = nn.Conv2d(in_channel, outs[0], kernel_size=kernel_size[0], stride=stride[0], padding=padding[0])
- self.bn1 = nn.BatchNorm2d(outs[0])
- self.conv2 = nn.Conv2d(outs[0], outs[1], kernel_size=kernel_size[1], stride=stride[1], padding=padding[1])
- self.bn2 = nn.BatchNorm2d(outs[1])
- self.conv3 = nn.Conv2d(outs[1], outs[2], kernel_size=kernel_size[2], stride=stride[2], padding=padding[2])
- self.bn3 = nn.BatchNorm2d(outs[2])
-
- self.extra = nn.Sequential(
- nn.Conv2d(in_channel, outs[2], kernel_size=1, stride=stride[3], padding=0),
- nn.BatchNorm2d(outs[2])
- )
-
- def forward(self, x):
- x_shortcut = self.extra(x)
- out = self.conv1(x)
- out = self.bn1(out)
- out = F.relu(out)
-
- out = self.conv2(out)
- out = self.bn2(out)
- out = F.relu(out)
-
- out = self.conv3(out)
- out = self.bn3(out)
- return F.relu(x_shortcut + out)
-
-
- class ResNet50(nn.Module):
- def __init__(self):
- super(ResNet50, self).__init__()
- self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3)
- self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
-
- self.layer1 = nn.Sequential(
- ResNet50DownBlock(64, outs=[64, 64, 256], kernel_size=[1, 3, 1], stride=[1, 1, 1, 1], padding=[0, 1, 0]),
- ResNet50BasicBlock(256, outs=[64, 64, 256], kernerl_size=[1, 3, 1], stride=[1, 1, 1, 1], padding=[0, 1, 0]),
- ResNet50BasicBlock(256, outs=[64, 64, 256], kernerl_size=[1, 3, 1], stride=[1, 1, 1, 1], padding=[0, 1, 0]),
- )
-
- self.layer2 = nn.Sequential(
- ResNet50DownBlock(256, outs=[128, 128, 512], kernel_size=[1, 3, 1], stride=[1, 2, 1, 2], padding=[0, 1, 0]),
- ResNet50BasicBlock(512, outs=[128, 128, 512], kernerl_size=[1, 3, 1], stride=[1, 1, 1, 1], padding=[0, 1, 0]),
- ResNet50BasicBlock(512, outs=[128, 128, 512], kernerl_size=[1, 3, 1], stride=[1, 1, 1, 1], padding=[0, 1, 0]),
- ResNet50DownBlock(512, outs=[128, 128, 512], kernel_size=[1, 3, 1], stride=[1, 1, 1, 1], padding=[0, 1, 0])
- )
-
- self.layer3 = nn.Sequential(
- ResNet50DownBlock(512, outs=[256, 256, 1024], kernel_size=[1, 3, 1], stride=[1, 2, 1, 2], padding=[0, 1, 0]),
- ResNet50BasicBlock(1024, outs=[256, 256, 1024], kernerl_size=[1, 3, 1], stride=[1, 1, 1, 1],
- padding=[0, 1, 0]),
- ResNet50BasicBlock(1024, outs=[256, 256, 1024], kernerl_size=[1, 3, 1], stride=[1, 1, 1, 1],
- padding=[0, 1, 0]),
- ResNet50DownBlock(1024, outs=[256, 256, 1024], kernel_size=[1, 3, 1], stride=[1, 1, 1, 1],
- padding=[0, 1, 0]),
- ResNet50DownBlock(1024, outs=[256, 256, 1024], kernel_size=[1, 3, 1], stride=[1, 1, 1, 1],
- padding=[0, 1, 0]),
- ResNet50DownBlock(1024, outs=[256, 256, 1024], kernel_size=[1, 3, 1], stride=[1, 1, 1, 1],
- padding=[0, 1, 0])
- )
-
- self.layer4 = nn.Sequential(
- ResNet50DownBlock(1024, outs=[512, 512, 2048], kernel_size=[1, 3, 1], stride=[1, 2, 1, 2],
- padding=[0, 1, 0]),
- ResNet50DownBlock(2048, outs=[512, 512, 2048], kernel_size=[1, 3, 1], stride=[1, 1, 1, 1],
- padding=[0, 1, 0]),
- ResNet50DownBlock(2048, outs=[512, 512, 2048], kernel_size=[1, 3, 1], stride=[1, 1, 1, 1],
- padding=[0, 1, 0])
- )
-
- self.avgpool = nn.AdaptiveAvgPool2d(output_size=(1, 1))
-
- self.fc = nn.Linear(2048, 10)
-
- def forward(self, x):
- out = self.conv1(x)
- out = self.maxpool(out)
- out = self.layer1(out)
- out = self.layer2(out)
- out = self.layer3(out)
- out = self.layer4(out)
- out = self.avgpool(out)
- out = out.reshape(x.shape[0], -1)
- out = self.fc(out)
- return out
-
-
- if __name__ == '__main__':
- x = torch.randn(2, 3, 224, 224)
- net = ResNet50()
- out = net(x)
- print('out.shape: ', out.shape)
- print(out)
-
-
main.py
- import torch
- from torch import nn, optim
- import torchvision.transforms as transforms
- from torchvision import datasets
- from torch.utils.data import DataLoader
- from resnet50 import ResNet50
-
-
- # 用CIFAR-10 数据集进行实验
-
- def main():
- batchsz = 128
-
- cifar_train = datasets.CIFAR10('cifar', True, transform=transforms.Compose([
- transforms.Resize((32, 32)),
- transforms.ToTensor(),
- transforms.Normalize(mean=[0.485, 0.456, 0.406],
- std=[0.229, 0.224, 0.225])
- ]), download=True)
- cifar_train = DataLoader(cifar_train, batch_size=batchsz, shuffle=True)
-
- cifar_test = datasets.CIFAR10('cifar', False, transform=transforms.Compose([
- transforms.Resize((32, 32)),
- transforms.ToTensor(),
- transforms.Normalize(mean=[0.485, 0.456, 0.406],
- std=[0.229, 0.224, 0.225])
- ]), download=True)
- cifar_test = DataLoader(cifar_test, batch_size=batchsz, shuffle=True)
-
- x, label = iter(cifar_train).next()
- print('x:', x.shape, 'label:', label.shape)
-
- device = torch.device('cuda')
- # model = Lenet5().to(device)
- model = ResNet50().to(device)
-
- criteon = nn.CrossEntropyLoss().to(device)
- optimizer = optim.Adam(model.parameters(), lr=1e-3)
- # print(model)
-
- for epoch in range(1000):
-
- model.train()
- for batchidx, (x, label) in enumerate(cifar_train):
- # [b, 3, 32, 32]
- # [b]
- x, label = x.to(device), label.to(device)
-
- logits = model(x)
- # logits: [b, 10]
- # label: [b]
- # loss: tensor scalar
- loss = criteon(logits, label)
-
- # backprop
- optimizer.zero_grad()
- loss.backward()
- optimizer.step()
-
- print(epoch, 'loss:', loss.item())
-
- model.eval()
- with torch.no_grad():
- # test
- total_correct = 0
- total_num = 0
- for x, label in cifar_test:
- # [b, 3, 32, 32]
- # [b]
- x, label = x.to(device), label.to(device)
-
- # [b, 10]
- logits = model(x)
- # [b]
- pred = logits.argmax(dim=1)
- # [b] vs [b] => scalar tensor
- correct = torch.eq(pred, label).float().sum().item()
- total_correct += correct
- total_num += x.size(0)
- # print(correct)
-
- acc = total_correct / total_num
- print(epoch, 'test acc:', acc)
-
-
- if __name__ == '__main__':
- main()
-
-
跑了100个Epoch,标号从0-99.最后的AC结果为0.7841
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。