当前位置:   article > 正文

Pytorch下查看各层名字及根据layers的name冻结层进行finetune训练;_model = net().cuda() for name, param in model.name

model = net().cuda() for name, param in model.named_parameters(): print(name
  1. from model_ori1 import resnet34 # model_ori1:your model
  2. import torch
  3. import torch.nn as nn
  4. class Net(nn.Module):
  5. def __init__(self):
  6. super(Net, self).__init__()
  7. model = resnet34()
  8. self.resnet = model
  9. def forward(self, img):
  10. out = self.resnet(img)
  11. # print('out is {}'.format(out))
  12. return out
  13. model = Net().cuda()
  14. for name, param in model.named_parameters(): # 查看可优化的参数有哪些
  15. if param.requires_grad:
  16. print(name)
  1. # 冻结某个参数预训练
  2. from ResNeSt.resnest.torch.resnest import resnest101,resnest200,resnest269
  3. class Net(nn.Module):
  4. def __init__(self):
  5. super(Net, self).__init__()
  6. model = resnest269(pretrained=True)
  7. model.fc = nn.Linear(2048,102)
  8. self.resnet = model
  9. def forward(self, img):
  10. out = self.resnet(img)
  11. # print('out is {}'.format(out))
  12. return out
  13. model = Net().cuda()
  14. for name, param in model.named_parameters(): # 查看可优化的参数有哪些
  15. if param.requires_grad:
  16. print(name)
  1. 新model == net:
  2. 读取旧pt,将其layer,name参数给新model中的
  3. model = resnet34()
  4. # inchannel = model.fc.in_features
  5. # print("fad",inchannel)
  6. # model.fc = nn.Linear(inchannel, 5)
  7. # m1,m = model.load_state_dict(torch.load("./rep2.pt"),strict= False)

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小丑西瓜9/article/detail/114188
推荐阅读
相关标签
  

闽ICP备14008679号