赞
踩
本文介绍了Pytorch如何用ConvTranspose2d算子等价替代Upsample算子。
背景介绍:
import onnx import torch import torch.nn.functional as F import torch.nn as nn import torch.optim as optim import numpy as np class UpsampleModel(torch.nn.Module): def __init__(self): super(UpsampleModel, self).__init__() self.up=nn.Upsample(scale_factor=2, mode='nearest') self.deconv1=nn.ConvTranspose2d(3,3,2,2,groups=1,bias=False) def forward(self, x): out0=self.up(x) out1=self.deconv1(x) return out0,out1
def train(): input_shape = (1, 3, 224, 224) model = UpsampleModel() criterion = nn.L1Loss() optimizer = optim.Adam(model.parameters(), lr=0.001) for epoch in range(2100): running_loss = 0.0 for i in range(100): input_data = torch.randn(input_shape) optimizer.zero_grad() out0,out1=model(input_data) loss = criterion(out0,out1) loss.backward() optimizer.step() running_loss += loss.item() avg_loss=running_loss / 100 print('[%d] loss: %f' % (epoch + 1,avg_loss )) running_loss = 0.0 if avg_loss<1e-4: w=model.deconv1.weight.detach().numpy() #print(w) print(np.round(w)) break train()
结果
[[[[ 1. 1.] [ 1. 1.]] [[-0. -0.] [-0. -0.]] [[ 0. -0.] [-0. -0.]]] [[[ 0. 0.] [ 0. -0.]] [[ 1. 1.] [ 1. 1.]] [[ 0. 0.] [ 0. -0.]]] [[[ 0. -0.] [-0. -0.]] [[ 0. -0.] [ 0. 0.]] [[ 1. 1.] [ 1. 1.]]]]
def val(): w=np.array( [[[[ 1. , 1.], [ 1. , 1.]], [[ 0. , 0.], [ 0. , 0.]], [[ 0. , 0.], [ 0. , 0.]]], [[[ 0. , 0.], [ 0. , 0.]], [[ 1. , 1.], [ 1. , 1.]], [[ 0. , 0.], [ 0. , 0.]]], [[[ 0. , 0.], [ 0. , 0.]], [[ 0. , 0.], [ 0. , 0.]], [[ 1. , 1.], [ 1. , 1.]]]] ) input_shape = (1, 3, 224, 224) model = UpsampleModel().eval() model.deconv1.weight=torch.nn.Parameter(torch.from_numpy(w.astype(np.float32))) #设置权值 input_data = torch.randn(input_shape) out0,out1=model(input_data) out0=out0.detach().numpy().reshape(-1) out1=out1.detach().numpy().reshape(-1) ret=(out0==out1).all() val()
输出
True
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。