赞
踩
以下就是我保存的其中一个epoch的模型训练参数的文件,是.pth文件。
import torch...
torch.save(model.state_dict(),path)
#path是模型保存的地址
#state_dict()是以字典的形式保存的
path="./model-27.pth"#这是我训练好的参数文件
state_dict=torch.load(path,map_location='cpu')
model_r=resnet34()#这个resnet34模型也得有
model_r.load_state_dict(state_dict)#取的是最后一层的weights
model=model_r.eval()# 一定要有这行,不然运算速度会变慢(要求梯度)而且会影响结果
import torch.nn as nn import math import torch.utils.model_zoo as model_zoo import torch.nn.functional as F def conv3x3(in_planes, out_planes, stride=1): "3x3 convolution with padding" return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False) class BasicBlock(nn.Module): expansion = 1 def __init__(self, inplanes, planes, stride=1, downsample=None): super(BasicBlock, self).__init__() self.conv1 = conv3x3(inplanes, planes, stride) self.bn1 = nn.BatchNorm2d(planes) self.relu = nn.ReLU(inplace=True) self.conv2 = conv3x3(planes, planes) self.bn2 = nn.BatchNorm2d(planes) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class Bottleneck(nn.Module): expansion = 4 def __init__(self, inplanes, planes, stride=1, downsample=None): super(Bottleneck, self).__init__() self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, stride=stride, bias=False) # change self.bn1 = nn.BatchNorm2d(planes) self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=1, # change padding=1, bias=False) self.bn2 = nn.BatchNorm2d(planes) self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False) self.bn3 = nn.BatchNorm2d(planes * 4) self.relu = nn.ReLU(inplace=True) self.downsample = downsample self.stride = stride def forward(self, x): residual = x out = self.conv1(x) out = self.bn1(out) out = self.relu(out) out = self.conv2(out) out = self.bn2(out) out = self.relu(out) out = self.conv3(out) out = self.bn3(out) if self.downsample is not None: residual = self.downsample(x) out += residual out = self.relu(out) return out class ResNet(nn.Module): def __init__(self, block, layers, num_classes=2): self.inplanes = 64 super(ResNet, self).__init__() self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False) self.bn1 = nn.BatchNorm2d(64) self.relu = nn.ReLU(inplace=True) self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=0, ceil_mode=True) # change self.layer1 = self._make_layer(block, 64, layers[0]) self.layer2 = self._make_layer(block, 128, layers[1], stride=2) self.layer3 = self._make_layer(block, 256, layers[2], stride=2) self.layer4 = self._make_layer(block, 512, layers[3], stride=2) self.avgpool = nn.AvgPool2d(7) self.fc = nn.Linear(512 * block.expansion, num_classes) for m in self.modules(): if isinstance(m, nn.Conv2d): n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels m.weight.data.normal_(0, math.sqrt(2. / n)) elif isinstance(m, nn.BatchNorm2d): m.weight.data.fill_(1) m.bias.data.zero_() def _make_layer(self, block, planes, blocks, stride=1): downsample = None if stride != 1 or self.inplanes != planes * block.expansion: downsample = nn.Sequential( nn.Conv2d(self.inplanes, planes * block.expansion, kernel_size=1, stride=stride, bias=False), nn.BatchNorm2d(planes * block.expansion), ) layers = [] layers.append(block(self.inplanes, planes, stride, downsample)) self.inplanes = planes * block.expansion for i in range(1, blocks): layers.append(block(self.inplanes, planes)) return nn.Sequential(*layers) def forward(self, x): x = self.conv1(x) x = self.bn1(x) x = self.relu(x) x = self.maxpool(x) x = self.layer1(x) x = self.layer2(x) x = self.layer3(x) x = self.layer4(x) x = F.adaptive_avg_pool2d(x, 1) x = x.view(x.size(0), -1) x = self.fc(x) return x def resnet18(pretrained=False): """Constructs a ResNet-18 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [2, 2, 2, 2]) if pretrained: model.load_state_dict(model_zoo.load_url(model_urls['resnet18'])) return model def resnet34(pretrained=False): """Constructs a ResNet-34 model. Args: pretrained (bool): If True, returns a model pre-trained on ImageNet """ model = ResNet(BasicBlock, [3, 4, 6, 3]) return model
import os import numpy as np import torch import torch.nn import torchvision.models as models from torch.autograd import Variable import torch.cuda import torchvision.transforms as transforms import torchvision.models as models from PIL import Image TARGET_IMG_SIZE = 224 img_to_tensor = transforms.ToTensor() def make_model(): path="./model-27.pth"#这是我训练好的参数文件 state_dict=torch.load(path,map_location='cpu') model_r=resnet34()#这个resnet34模型也得有 model_r.load_state_dict(state_dict)#取的是最后一层的weights model=model_r.eval()# 一定要有这行,不然运算速度会变慢(要求梯度)而且会影响结果 #model.cuda()# 将模型从CPU发送到GPU,如果没有GPU则删除该行 return model #特征提取 def extract_feature(model,imgpath): model.eval()# 必须要有,不然会影响特征提取结果 img=Image.open(imgpath)# 读取图片 img=img.resize((TARGET_IMG_SIZ, TARGET_IMG_SIZ)) tensor=img_to_tensor(img)# 将图片转化成tensor tensor=torch.unsqueeze(tensor,dim=0)#添加维度,resNet文件中,图片的shape是4D的,我自己的文件是3D #tensor=tensor.cuda()# 如果只是在cpu上跑的话要将这行去掉 result=model(Variable(tensor)) result_npy=result.data.cpu().numpy()# 保存的时候一定要记得转成cpu形式的,不然可能会出错 return result_npy[0]# 返回的矩阵shape是[1, 512, 14, 14],这么做是为了让shape变回[512, 14,14]
我的图片是单通道灰度图在提取特征前需要将其变为三通道图。一定要注意自己输入的图片文件是几通道图,并且知道自己模型文件需要输入的是几维的图,对自己的图进行一定的处理。
from PIL import Image model=make_model() feature=[] imlist = imtools.get_imlist('./data_3c')#读取每个图片的地址 imnbr = len(imlist) for i in range(imnbr): tmp = extract_feature(model,imlist[i]) feature.append(tmp) '''feature的输出,因为我resNet文件最终类别数是2,所以输出每一个图片对应的的size为2array [array([-0.23673996, 0.14585349], dtype=float32), array([-1.0535352, 1.5139551], dtype=float32), array([-1.2570125 , 0.19796559], dtype=float32), array([-1.8487567, 2.2394893], dtype=float32), array([-1.427359, 1.629936], dtype=float32), array([-0.1620569 , 0.19238147], dtype=float32), array([-1.1115686, 1.2074753], dtype=float32), array([-1.7985297, 1.8849363], dtype=float32), array([-1.6629931, 2.1328294], dtype=float32), array([-1.6978788, 1.8067389], dtype=float32), array([-1.0565822, 0.9773378], dtype=float32), array([-0.89378214, 0.7869887 ], dtype=float32), ................ '''
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。