当前位置:   article > 正文

[开源]CNN-BiLSTM-Attention时间序列预测模型python代码_python实现cnn多变量回归预测模型代码

python实现cnn多变量回归预测模型代码

整理了CNN-BiLSTM-Attention时间序列预测模型python代码分享给大家,记得点赞哦!

  1. #帅帅的笔者
  2. # coding: utf-8
  3. from keras.layers import Input, Dense, LSTM ,Conv1D,Dropout,Bidirectional,Multiply,Concatenate,BatchNormalization
  4. from keras.models import Model
  5. from sklearn.metrics import r2_score, mean_squared_error, mean_absolute_error
  6. from keras.layers.core import *
  7. from keras.models import *
  8. from keras.utils.vis_utils import plot_model
  9. from keras import optimizers
  10. import numpy
  11. import numpy as np
  12. import pandas as pd
  13. import math
  14. import datetime
  15. import matplotlib.pyplot as plt
  16. from pandas import read_csv
  17. from keras.models import Sequential
  18. from keras.layers import Dense
  19. from sklearn.preprocessing import MinMaxScaler
  20. from keras import backend as K
  21. def attention_function(inputs, single_attention_vector=False):
  22. TimeSteps = K.int_shape(inputs)[1]
  23. input_dim = K.int_shape(inputs)[2]
  24. a = Permute((2, 1))(inputs)
  25. a = Dense(TimeSteps, activation='softmax')(a)
  26. if single_attention_vector:
  27. a = Lambda(lambda x: K.mean(x, axis=1))(a)
  28. a = RepeatVector(input_dim)(a)
  29. a_probs = Permute((2, 1))(a)
  30. output_attention_mul = Multiply()([inputs, a_probs])
  31. return output_attention_mul
  32. def creat_dataset(dataset, look_back):
  33. dataX, dataY = [], []
  34. for i in range(len(dataset) - look_back - 1):
  35. a = dataset[i: (i + look_back)]
  36. dataX.append(a)
  37. dataY.append(dataset[i + look_back])
  38. return np.array(dataX), np.array(dataY)
  39. dataframe = pd.read_csv('天气.csv', header=0, parse_dates=[0], index_col=0, usecols=[0, 1])
  40. dataset = dataframe.values
  41. scaler = MinMaxScaler(feature_range=(0, 1))
  42. dataset = scaler.fit_transform(dataset.reshape(-1, 1))
  43. train_size = int(len(dataset) * 0.8)
  44. test_size = len(dataset) - train_size
  45. train, test = dataset[0: train_size], dataset[train_size: len(dataset)]
  46. look_back = 5
  47. trainX, trainY = creat_dataset(train, look_back)
  48. testX, testY = creat_dataset(test, look_back)
  49. def attention_model():
  50. inputs = Input(shape=(look_back, 1))
  51. x = Conv1D(filters = 128, kernel_size = 1, activation = 'relu')(inputs)
  52. BiLSTM_out = Bidirectional(LSTM(64, return_sequences=True,activation="relu"))(x)
  53. Batch_Normalization = BatchNormalization()(BiLSTM_out)
  54. Drop_out = Dropout(0.1)(Batch_Normalization)
  55. attention = attention_function(Drop_out)
  56. Batch_Normalization = BatchNormalization()(attention)
  57. Drop_out = Dropout(0.1)(Batch_Normalization)
  58. Flatten_ = Flatten()(Drop_out)
  59. output=Dropout(0.1)(Flatten_)
  60. output = Dense(1, activation='sigmoid')(output)
  61. model = Model(inputs=[inputs], outputs=output)
  62. return model
  63. model = attention_model()
  64. model.compile(loss='mean_squared_error', optimizer='adam')
  65. model.summary()
  66. history = model.fit(trainX, trainY, epochs=100, batch_size=64, verbose=0,validation_data=(testX, testY))
  67. trainPredict = model.predict(trainX)
  68. testPredict = model.predict(testX)
  69. trainPredict = scaler.inverse_transform(trainPredict)
  70. trainY = scaler.inverse_transform(trainY)
  71. testPredict = scaler.inverse_transform(testPredict)
  72. testY = scaler.inverse_transform(testY)
  73. testScore = math.sqrt(mean_squared_error(testY, testPredict[:, 0]))
  74. print('RMSE %.3f' %(testScore))
  75. testScore = mean_absolute_error(testY, testPredict[:, 0])
  76. print('MAE %.3f' %(testScore))
  77. testScore = r2_score(testY, testPredict[:, 0])
  78. print('R2 %.3f' %(testScore))
  79. trainPredictPlot = np.empty_like(dataset)
  80. trainPredictPlot[:] = np.nan
  81. trainPredictPlot = np.reshape(trainPredictPlot, (dataset.shape[0], 1))
  82. trainPredictPlot[look_back: len(trainPredict) + look_back, :] = trainPredict
  83. testPredictPlot = np.empty_like(dataset)
  84. testPredictPlot[:] = np.nan
  85. testPredictPlot = np.reshape(testPredictPlot, (dataset.shape[0], 1))
  86. testPredictPlot[len(trainPredict) + (look_back * 2) + 1: len(dataset) - 1, :] = testPredict
  87. plt.plot(history.history['loss'])
  88. plt.title('model loss')
  89. plt.ylabel('loss')
  90. plt.xlabel('epoch')
  91. plt.show()
  92. M = scaler.inverse_transform(dataset)
  93. N = scaler.inverse_transform(test)
  94. plt.figure(figsize=(10, 3),dpi=200)
  95. plt.plot(range(len(train),len(dataset)),N, label="Actual", linewidth=1)
  96. plt.plot(testPredictPlot, label='Prediction',linewidth=1,linestyle="--")
  97. plt.title('CNN-BiLSTM-attention Prediction', size=10)
  98. plt.ylabel('AQI',size=10)
  99. plt.xlabel('时间',size=10)
  100. plt.legend()
  101. plt.show()

更多55+时间序列预测python代码获取链接:时间序列预测算法全集合--深度学习

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/IT小白/article/detail/502214
推荐阅读
相关标签
  

闽ICP备14008679号