当前位置:   article > 正文

深度学习算法transformer(时序预测)模型优化(二)_nn.transformer 时序预测

nn.transformer 时序预测

 

本文采用nn.Transformer,加入了时间编码、位置编码、Token编码对模型进行优化,预测效果明显比上一篇(一)要好,数据集依旧是ETT数据集,代码如下

 

1. 导入必须要的包

  1. import torch
  2. import torch.nn as nn
  3. import numpy as np
  4. import pandas as pd
  5. import plotly.express as px
  6. from sklearn import metrics
  7. from torch.utils.data import Dataset, DataLoader

2. 定义时间编码

  1. # hour of day / day of wheek / day of month / day of year
  2. def HourOfDay(date):
  3. """Hour of day encoded as value between [-0.5, 0.5]"""
  4. return date.hour / 23.0 - 0.5
  5. def DayOfWeek(date):
  6. """Hour of day encoded as value between [-0.5, 0.5]"""
  7. return date.dayofweek / 6.0 - 0.5
  8. def DayOfMonth(date):
  9. """Day of month encoded as value between [-0.5, 0.5]"""
  10. return (date.day - 1) / 30.0 - 0.5
  11. def DayOfYear(date):
  12. """Day of year encoded as value between [-0.5, 0.5]"""
  13. return (date.dayofyear - 1) / 365.0 - 0.5

 3. 定义位置编码

  1. class PositionalEncoding(nn.Module):
  2. def __init__(self, d_model, device, max_len=5000):
  3. super(PositionalEncoding, self).__init__()
  4. position = torch.arange(0, max_len).unsqueeze(1)
  5. div_term = torch.exp(torch.arange(0, d_model, 2) * -(np.log(10000.0) / d_model))
  6. pe = torch.zeros(max_len, d_model)
  7. pe[:, 0::2] = torch.sin(position * div_term)
  8. pe[:, 1::2] = torch.cos(position * div_term)
  9. self.pe = pe.unsqueeze(0).transpose(1, 0).to(device)
  10. def forward(self, x):
  11. return self.pe[:x.size(0), :]

 4. 定义Token编码

  1. class TokenEmbedding(nn.Module):
  2. def __init__(self, input_size, d_model, kernal_size, spadding):
  3. super(TokenEmbedding, self).__init__()
  4. padding = 1 if torch.__version__ >= '1.5.0' else 2
  5. self.tokenConv = nn.Conv1d(input_size, d_model,
  6. kernal_size, stride = 1, padding = padding,
  7. padding_mode='circular')
  8. for m in self.modules():
  9. if isinstance(m, nn.Conv1d):
  10. nn.init.kaiming_normal_(m.weight, mode='fan_in',
  11. nonlinearity='leaky_relu')
  12. def forward(self, x):
  13. x = self.tokenConv(x.permute(1, 2, 0)).permute(2, 0, 1)
  14. return x

 5. 定义优化后的transformer网络

  1. class TransformerTimeSeriesModel(nn.Module):
  2. def __init__(self, input_size, d_model, device, pred_length, nhead,
  3. num_encoder_layers, num_decoder_layers, dim_feedforward,
  4. kernal_size, padding, output_size, time_stamp_length, dropout=0.1):
  5. super(TransformerTimeSeriesModel, self).__init__()
  6. self.pred_length = pred_length
  7. self.device = device
  8. self.value_encoding = TokenEmbedding(input_size, d_model, kernal_size, padding)
  9. self.positional_encoding = PositionalEncoding(d_model, device)
  10. self.timefeature_encoding = nn.Linear(time_stamp_length, d_model)
  11. self.transformer = nn.Transformer(d_model=d_model, nhead=nhead,
  12. num_encoder_layers=num_encoder_layers,
  13. num_decoder_layers=num_decoder_layers,
  14. dim_feedforward=dim_feedforward,
  15. dropout=dropout)
  16. self.fc_out = nn.Linear(d_model, output_size)
  17. def forward(self, src, src_date, tgt, tgt_date, tgt_mask=None):
  18. tgt_mask = nn.Transformer.generate_square_subsequent_mask(tgt.size(0)).to(self.device)
  19. src = self.value_encoding(src) + self.positional_encoding(src) + self.timefeature_encoding(src_date)
  20. tgt = self.value_encoding(tgt) + self.positional_encoding(tgt) + self.timefeature_encoding(tgt_date)
  21. output = self.transformer(src, tgt, tgt_mask=tgt_mask)
  22. return self.fc_out(output)[-self.pred_length:,:, :]

6. 定义有关Dataset的数据整理

  1. # 定义Dataset
  2. class get_dataset(Dataset):
  3. def __init__(self, data_path, seq_length, label_length, pred_length,
  4. time_stamp_length, features, train_split, mode):
  5. self.mode = mode
  6. self.data_path = data_path
  7. self.features = features
  8. self.seq_length = seq_length
  9. self.label_length = label_length
  10. self.pred_length = pred_length
  11. self.time_stamp_length = time_stamp_length
  12. self.data, self.date_stamp, self.data_max, self.data_min = self.get_data()
  13. # print(self.data)
  14. # print(self.data[0, :-1, :])
  15. # print(self.data[0, -1, -1])
  16. # print(self.data[0, -1, -1].unsqueeze(0))
  17. # print(self.data[0, -1, -1].unsqueeze(0).unsqueeze(1))
  18. train_num = int(train_split * len(self.data))
  19. if self.mode == 'train':
  20. self.data = self.data[:train_num, :, :]
  21. self.date_stamp = self.date_stamp[:train_num, :, :]
  22. else:
  23. self.data = self.data[train_num:, :, :]
  24. self.date_stamp = self.date_stamp[train_num:, :, :]
  25. def __len__(self):
  26. return len(self.data)
  27. def __getitem__(self, index):
  28. return self.data[index, :-self.pred_length, :], \
  29. self.data[index, (self.seq_length - self.label_length):, :], \
  30. self.date_stamp[index, :-self.pred_length, :], \
  31. self.date_stamp[index, (self.seq_length - self.label_length):, :], \
  32. self.data[index, self.seq_length:, -1].unsqueeze(1), \
  33. def get_data(self):
  34. data = pd.read_csv(self.data_path)
  35. data.index = pd.to_datetime(data['date'])
  36. data = data.drop('date', axis=1)
  37. data_max = data.max()
  38. data_min = data.min()
  39. data = (data - data_min) / (data_max - data_min)
  40. num_sample = len(data) - self.seq_length - self.pred_length + 1
  41. seq_data = torch.zeros(num_sample,
  42. self.seq_length + self.pred_length,
  43. len(self.features))
  44. date_stamp = torch.zeros(num_sample,
  45. self.seq_length + self.pred_length,
  46. self.time_stamp_length)
  47. # print(data.iloc[0:0 + self.seq_length + 1, self.features].values)
  48. for i in range(num_sample):
  49. seq_data[i] = torch.tensor(data.iloc[i:i + self.seq_length + self.pred_length,
  50. self.features].values)
  51. time_seq = data.index[i:i + self.seq_length + self.pred_length]
  52. hourofday = torch.tensor(list(map(lambda date: HourOfDay(date), time_seq)))
  53. dayofwheek = torch.tensor(list(map(lambda date: DayOfWeek(date), time_seq)))
  54. dayofmonth = torch.tensor(list(map(lambda date: DayOfMonth(date), time_seq)))
  55. dayofyear = torch.tensor(list(map(lambda date: DayOfYear(date), time_seq)))
  56. date_stamp[i] = torch.stack([hourofday, dayofwheek, dayofmonth, dayofyear]).transpose(1, 0)
  57. # print(data_max)
  58. # print(data_min)
  59. return seq_data, date_stamp, data_max, data_min

7. 定义训练

  1. def train(model, dataset, epochs, optim, loss_function, device, batch_size, shuffle=True):
  2. data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
  3. for epoch in range(epochs):
  4. train_loss = 0
  5. model.train()
  6. for x, y, x_date, y_date, label in data_loader:
  7. x, y, label = x.transpose(1, 0).to(device), y.transpose(1, 0).to(device), label.transpose(1, 0).to(device)
  8. x_date = x_date.transpose(1, 0).to(device)
  9. y_date = y_date.transpose(1, 0).to(device)
  10. # print('x', x.shape)
  11. # print('y', y.shape)
  12. # print('label', label.shape)
  13. pred = model(x, x_date, y, y_date)
  14. # print('pred', pred)
  15. # print('pred', pred.shape)
  16. loss = loss_function(pred, label)
  17. optim.zero_grad()
  18. loss.backward()
  19. optim.step()
  20. train_loss += loss.item()
  21. train_loss /= len(data_loader)
  22. print('epoch / epochs : %d / %d, loss : %.6f' % (epoch, epochs, train_loss))

8. 定义测试

  1. def test(model, dataset, device, batch_size, label_length, pred_length, root_path, shuffle=False):
  2. model.eval()
  3. data_loader = DataLoader(dataset, batch_size=batch_size, shuffle=shuffle)
  4. # print('data_loader : ', len(data_loader))
  5. # print('dataset : ', len(dataset))
  6. preds, labels = np.zeros(len(dataset) * pred_length), \
  7. np.zeros(len(dataset) * pred_length)
  8. left, right = 0, 0
  9. for x, y, x_date, y_date, label in data_loader:
  10. left = right
  11. # if len(label) != 32:
  12. # print('--')
  13. right += len(label) * pred_length
  14. x, y = x.transpose(1, 0).to(device), y.transpose(1, 0).to(device)
  15. x_date = x_date.transpose(1, 0).to(device)
  16. y_date = y_date.transpose(1, 0).to(device)
  17. pred = pred = model(x, x_date, y, y_date).detach().cpu().numpy().flatten()
  18. # print('right:', right)
  19. # print('label : ', label.flatten())
  20. # print('pred : ', pred)
  21. # print(label.flatten().shape)
  22. # print(pred.shape)
  23. preds[left:right] = pred
  24. labels[left:right] = label.transpose(1, 0).detach().cpu().numpy().flatten()
  25. preds_ = preds * (dataset.data_max['OT'] - dataset.data_min['OT']) + dataset.data_min['OT']
  26. labels_ = labels * (dataset.data_max['OT'] - dataset.data_min['OT']) + dataset.data_min['OT']
  27. np.save(root_path + '_preds.npy', preds)
  28. np.save(root_path + '_labels.npy', labels)
  29. return preds_, labels_

9. 定义模型评估指标

  1. def get_metric(pred, label):
  2. index = np.where(label > 0.01)
  3. mse = np.mean((label - pred) ** 2)
  4. r2 = 1 - np.sum((label - pred) ** 2) / np.sum((label - np.mean(label)) ** 2)
  5. mape = np.abs((pred[index] - label[index]) / label[index]).mean()
  6. mae = np.abs(label - pred).mean()
  7. return mse, r2, mape, mae
  8. def model_eva(pred, label):
  9. fig = px.line(title='transformer模型预测')
  10. fig.add_scatter(y=label, name='label')
  11. fig.add_scatter(y=pred, name='pred')
  12. fig.show()
  13. # print(label)
  14. # print(pred)
  15. # label_nozero = labels[labels == 0] = 1e-3
  16. mse, r2, mape, mae = get_metric(pred, label)
  17. print('MSE : %.6f' % (mse))
  18. print('R2 : %.6f' % (r2))
  19. print('MAPE : %.6f' % (mape))
  20. print('MAE : %.6f' % (mae))

10. main函数以及运行结果

  1. seed = 0
  2. torch.manual_seed(seed)
  3. if torch.cuda.is_available():
  4. torch.cuda.manual_seed(seed)
  5. torch.cuda.manual_seed_all(seed)
  6. np.random.seed(seed)
  7. torch.backends.cudnn.benchmark = False
  8. torch.backends.cudnn.deterministic = True
  9. device = 'cuda' if torch.cuda.is_available() else 'cpu'
  10. seq_length = 96
  11. label_length = 48
  12. pred_length = 24
  13. features = [6] # [HUFL,HULL,MUFL,MULL,LUFL,LULL,OT]
  14. input_size = len(features)
  15. output_size = 1
  16. epochs = 100
  17. lr = 0.005
  18. batch_size = 32
  19. train_split = 0.8
  20. d_model = 128
  21. nhead = 2
  22. num_encoder_layers = 2
  23. num_decoder_layers = 2
  24. dim_feedforward = 128
  25. dropout = 0.1
  26. kernal_size = 3
  27. paddig = 1
  28. time_stamp_length = 4
  29. root_path = './' + 'seq_' + str(seq_length) + '_label_' + str(label_length) + '_pred_' + str(pred_length)
  30. save_path = root_path + '_transformer.pth'
  31. model = TransformerTimeSeriesModel(input_size, d_model, device, pred_length,
  32. nhead, num_encoder_layers, num_decoder_layers,
  33. dim_feedforward, kernal_size, paddig, output_size,
  34. time_stamp_length, dropout=0.1).to(device)
  35. optim = torch.optim.SGD(model.parameters(), lr=lr)
  36. loss_function = nn.MSELoss()
  37. dataset_train = get_dataset(data_path, seq_length, label_length, pred_length, time_stamp_length, features, train_split = train_split, mode = 'train')
  38. dataset_test = get_dataset(data_path, seq_length, label_length, pred_length, time_stamp_length, features, train_split = train_split, mode = 'test')
  39. train(model, dataset_train, epochs, optim, loss_function, device, batch_size, shuffle = True)
  40. torch.save(model.state_dict(), save_path)
  41. preds, labels = test(model, dataset_test, device, batch_size, label_length, pred_length, root_path, shuffle=False)
  42. model_eva(preds, labels)

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小桥流水78/article/detail/763937
推荐阅读
相关标签
  

闽ICP备14008679号