赞
踩
PyTorch在自定义变量及其初始化方法:
- self.fuse_weight_1 = torch.nn.Parameter(torch.FloatTensor(1), requires_grad=True)
- self.fuse_weight_1.data.fill_(0.25)
如上是定义一个可学习的标量。也可以定义一个可学习的矩阵:
self.fuse_weight_1 = torch.nn.Parameter(torch.FloatTensor(torch.rand(3,3)), requires_grad=True)
PyTorch自定义卷积层初始化方法:
1.
- class Net(nn.Module):
-
- def __init__(self):
- super(Net, self).__init__()
-
- self.conv = nn.Sequential(
- nn.Conv2d(self.input_dim, 64, 4, 2, 1),
- nn.ReLU(),
- )
-
- self.fc = nn.Sequential(
- nn.Linear(32, 64 * (self.input_height // 2) * (self.input_width // 2)),
- nn.BatchNorm1d(64 * (self.input_height // 2) * (self.input_width // 2)),
- nn.ReLU(),
- )
-
- self.deconv = nn.Sequential(
- nn.ConvTranspose2d(64, self.output_dim, 4, 2, 1),
- nn.Sigmoid(),
- )
-
- utils.initialize_weights(self)
-
- def forward(self, input):
- ...
-
- def initialize_weights(net):
- for m in net.modules():
- if isinstance(m, nn.Conv2d):
- m.weight.data.normal_(0, 0.02)
- m.bias.data.zero_()
- elif isinstance(m, nn.ConvTranspose2d):
- m.weight.data.normal_(0, 0.02)
- m.bias.data.zero_()
- elif isinstance(m, nn.Linear):
- m.weight.data.normal_(0, 0.02)
- m.bias.data.zero_()
2.
- def init_weights(m):
- print(m)
- if type(m) == nn.Linear:
- m.weight.data.fill_(1.0)
- print(m.weight)
-
- net = nn.Sequential(nn.Linear(2, 2), nn.Linear(2, 2))
- net.apply(init_weights)
3.
- def weights_init(m):
- classname = m.__class__.__name__
- if classname.find('Conv') != -1:
- n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
- m.weight.data.normal_(0, math.sqrt(2. / n))
- if m.bias is not None:
- m.bias.data.zero_()
- elif classname.find('BatchNorm') != -1:
- m.weight.data.fill_(1)
- m.bias.data.zero_()
- elif classname.find('Linear') != -1:
- m.weight.data.normal_(0, 0.01)
- m.bias.data = torch.ones(m.bias.data.size())
-
- net.apply(init_weights)
4.
- self.fuse_weight_1 = nn.Conv2d(1, 1, kernel_size=1, stride=1, bias=False)
- self.fuse_weight_1.weight.data.fill_(0.2)
参考:
2. Pytorch 细节记录
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。