赞
踩
主要代码,实现了一个简单的encoder-decoder模型,并把模型参数保存为numpy数组
# coding=utf-8 import configparser from torch.utils.data import Dataset, DataLoader import torch import torch.nn.functional as F import numpy as np import itertools import matplotlib.pyplot as plt class SelfDataset(Dataset): def __init__(self,filename,norm=True): self.filename = filename self.norm = norm self.data = self.read_data() def read_data(self): data = [] with open(self.filename,'r',encoding='utf-8') as f: f.readline() for line in f.readlines(): digits = [float(x) for x in line.strip().split(',')] data.append(digits) data = np.array(data) if self.norm: data = (data-np.min(data,axis=0))/(np.max(data,axis=0)-np.min(data,axis=0)) return data def __getitem__(self, index): return self.data[index] def __len__(self): return len(self.data) class Model(torch.nn.Module): def __init__(self, feature_in,hidden_units,bias=False,weight_path=None,bias_path=None): super(Model, self).__init__() self.bias = bias self.weighs_path = weight_path self.bias_path = bias_path self.input_layer = torch.nn.Linear(feature_in,hidden_units[0],bias=bias) self.encoder = torch.nn.Sequential(*[torch.nn.Linear(unit_in,unit_out,bias=bias) for unit_in,unit_out in zip(hidden_units[:-1],hidden_units[1:])]) self.decoder = torch.nn.Sequential(*[torch.nn.Linear(unit_in,unit_out,bias=bias) for unit_in,unit_out in zip(hidden_units[1::-1],hidden_units[:-1:-1])]) self.output_layer = torch.nn.Linear(hidden_units[-1],feature_in,bias=bias) def forward(self, x): x = self.input_layer(x) x = F.relu(x) x = self.encoder(x) x = self.decoder(x) return self.output_layer(x) def save_weights_as_numpy(self): weights = [] bias = [] with torch.no_grad(): for name,module in itertools.chain(zip(["input_layer"], [self.input_layer]), self.encoder.named_children()): weights.append(module.weight.cpu().numpy()) if self.bias: bias.append(module.bias.cpu().numpy()) if self.weighs_path is not None: np.save(self.weighs_path,weights) if self.bias and self.bias_path is not None: np.save(self.bias_path,bias) def train(model, device, train_loader, criterion, optimizer, epochs=10): model.train() for epoch in range(epochs): for batch_idx, x in enumerate(train_loader): x = x.to(torch.float32) x = x.to(device) optimizer.zero_grad() output = model(x) loss = criterion(x, output) loss.backward() optimizer.step() if batch_idx % 100 == 0: print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format( epoch, batch_idx * len(x), len(train_loader.dataset), 100. * batch_idx / len(train_loader), loss.item())) if __name__ == '__main__': parser = configparser.ConfigParser() parser.read('config.ini',encoding='utf-8') feature_in = parser.getint('net','feature_in') hidden_units = [int(x) for x in parser.get('net','hidden_layers_units').split(',')] biased = parser.getboolean('net','bias') epochs = parser.getint('train','epochs') batch_size = parser.getint('train','batch_size') learning_rate = parser.getfloat('train','learning_rate') weight_path = parser.get('path','weight_path') data_path = parser.get('path','data_path') device = torch.device("cuda" if torch.cuda.is_available() else "cpu") data_set = SelfDataset(data_path,True) data_loader = DataLoader(data_set,batch_size=batch_size,shuffle=True) model = Model(feature_in,hidden_units,biased,weight_path=weight_path).to(device) criterion = torch.nn.MSELoss() optimizer = torch.optim.SGD(model.parameters(),lr=learning_rate) train(model,device,data_loader,criterion,optimizer,epochs) model.save_weights_as_numpy()
配置文件
;保存的文件路径 [path] data_path = C:/Users/xia/PycharmProjects/pytorch_demo/encoder/data/feature.csv weight_path = weight.npy bias_path = bias.npy ;第一参数,是否需要偏置,第二个是输入向量大小,第三个是decoder-encoder每层的节点数,比如下面,能获得9x4和4x2的两个矩阵 [net] bias = False feature_in = 9 hidden_layers_units = 4,2 ;训练参数 [train] batch_size = 32 epochs = 100 learning_rate = 0.01
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。