赞
踩
使用LSTM预测时间序列数据
from pandas import read_csv from datetime import datetime import pandas as pd from pandas import DataFrame from sklearn.preprocessing import LabelEncoder,MinMaxScaler from sklearn.metrics import mean_squared_error from keras.models import Sequential from keras.layers import Dense, Dropout from keras.layers import LSTM from keras.layers.recurrent import SimpleRNN from numpy import concatenate from math import sqrt # load data def parse(x): return datetime.strptime(x, '%Y %m %d %H') def read_raw(): dataset = pd.read_csv('raw.csv', parse_dates = [['year', 'month', 'day', 'hour']], index_col=0, date_parser=parse) dataset.drop('No', axis=1, inplace=True) # manually specify column names dataset.columns = ['pollution', 'dew', 'temp', 'press', 'wnd_dir', 'wnd_spd', 'snow', 'rain'] dataset.index.name = 'date' # mark all NA values with 0 dataset['pollution'].fillna(0, inplace=True) # drop the first 24 hours dataset = dataset[24:] # summarize first 5 rows print(dataset.head(5)) # save to file dataset.to_csv('pollution.csv') # convert series to supervised learning def series_to_supervised(data, n_in=1, n_out=1, dropnan=True): n_vars = 1 if type(data) is list else data.shape[1] df = DataFrame(data) cols, names = list(), list() # input sequence (t-n, ... t-1) for i in range(n_in, 0, -1): cols.append(df.shift(i)) names += [('var%d(t-%d)' % (j+1, i)) for j in range(n_vars)] # forecast sequence (t, t+1, ... t+n) for i in range(0, n_out): cols.append(df.shift(-i)) if i == 0: names += [('var%d(t)' % (j+1)) for j in range(n_vars)] else: names += [('var%d(t+%d)' % (j+1, i)) for j in range(n_vars)] # put it all together agg = pd.concat(cols, axis=1) agg.columns = names # drop rows with NaN values if dropnan: agg.dropna(inplace=True) return agg # load dataset dataset = read_csv('pollution.csv', header=0, index_col=0) values = dataset.values # integer encode direction encoder = LabelEncoder() print(values[:,4]) values[:,4] = encoder.fit_transform(values[:,4]) # ensure all data is float values = values.astype('float32') # normalize features scaler = MinMaxScaler(feature_range=(0, 1)) scaled = scaler.fit_transform(values) # frame as supervised learning reframed = series_to_supervised(scaled, 1, 1) #reframed = series_to_supervised(scaled, 3, 1) #用前3天的数据,预测当天的数据 print("columns:", reframed.columns) # drop columns we don't want to predict reframed.drop(reframed.columns[[9,10,11,12,13,14,15]], axis=1, inplace=True) #用前1天的数据,预测当天的数据 #reframed.drop(reframed.columns[[25,26,27,28,29,30,31]], axis=1, inplace=True)#用前3天的数据,预测当天的数据 print(reframed.head()) print("new columns:", reframed.columns) # split into train and test sets values = reframed.values n_train_hours = 365 * 24 train = values[:n_train_hours, :] test = values[n_train_hours:, :] # split into input and outputs train_X, train_y = train[:, :-1], train[:, -1] test_X, test_y = test[:, :-1], test[:, -1] # reshape input to be 3D [samples, timesteps, features] #使用Dense()模型时不用变换 train_X = train_X.reshape((train_X.shape[0], 1, train_X.shape[1])) test_X = test_X.reshape((test_X.shape[0], 1, test_X.shape[1])) print(train_X.shape, train_y.shape, test_X.shape, test_y.shape) # design network model = Sequential() #model.add(LSTM(50, input_shape=(train_X.shape[1], train_X.shape[2]))) #model.add(Dense(50, activation='relu', input_dim = 8)) model.add(SimpleRNN(50, input_shape=(train_X.shape[1], train_X.shape[2]))) model.add(Dense(1)) model.compile(loss='mae', optimizer='adam') # fit network history = model.fit(train_X, train_y, epochs=50, batch_size=72, validation_data=(test_X, test_y), verbose=2, shuffle=False) # make a prediction yhat = model.predict(test_X) print("yhat shape:", yhat.shape) ''' 计算在测试集上的均方差 test_X = test_X.reshape((test_X.shape[0], test_X.shape[2])) print("test_X shape:", test_X.shape) # invert scaling for forecast inv_yhat = concatenate((yhat, test_X[:, 1:]), axis=1) inv_yhat = scaler.inverse_transform(inv_yhat) inv_yhat = inv_yhat[:,0] # invert scaling for actual test_y = test_y.reshape((len(test_y), 1)) inv_y = concatenate((test_y, test_X[:, 1:]), axis=1) print("inv_y:", inv_y[:10]) print("inv_y shape:", inv_y.shape) inv_y = scaler.inverse_transform(inv_y) print(inv_y, "*"*30) inv_y = inv_y[:,0] # calculate RMSE rmse = sqrt(mean_squared_error(inv_y, inv_yhat)) print(inv_y[:100], inv_yhat[:100]) print('Test RMSE: %.3f' % rmse) '''
实验1:用前一天的天气数据,预测某一天的空气质量 使用LSTM模型 结果: Epoch 49/50 0s - loss: 0.0144 - val_loss: 0.0133 Epoch 50/50 0s - loss: 0.0144 - val_loss: 0.0133 实验2:用前3天的天气数据,预测某一天的空气质量 使用LSTM模型 结果: Epoch 49/50 0s - loss: 0.0147 - val_loss: 0.0149 Epoch 50/50 0s - loss: 0.0147 - val_loss: 0.0150 实验3:用前一天的天气数据,预测某一天的空气质量 使用普通的全连接模型 Dense() 结果: Epoch 49/50 0s - loss: 0.0144 - val_loss: 0.0146 Epoch 50/50 0s - loss: 0.0148 - val_loss: 0.0151 实验4:用前三天的天气数据,预测某一天的空气质量 使用普通的全连接模型 Dense() 结果: Epoch 49/50 0s - loss: 0.0150 - val_loss: 0.0165 Epoch 50/50 0s - loss: 0.0148 - val_loss: 0.0141 实验5:用前一天的天气数据,预测某一天的空气质量 使用SimpleRNN Epoch 49/50 0s - loss: 0.0160 - val_loss: 0.0140 Epoch 50/50 0s - loss: 0.0147 - val_loss: 0.0150 实验6:用前三天的天气数据,预测某一天的空气质量 使用SimpleRNN Epoch 49/50 0s - loss: 0.0164 - val_loss: 0.0233 Epoch 50/50 0s - loss: 0.0166 - val_loss: 0.0227
RNN中的循环是指一个序列当前的输出与前面的输出也有关系。也就是说,网络会对前面的信息进行记忆并应用于当前输出的计算中,即隐层之间的节点不再是无连接的而是有连接的,并且隐层的输入不仅包括输入层的输出还包括上一时刻隐层的输出。
LSTM的内部结构通过门控状态来控制传输状态,记住需要长时间记忆的,忘记不重要的信息;而不像普通的RNN那样只能够“呆萌”地仅有一种记忆叠加方式。对很多需要“长期记忆”的任务来说,尤其好用。
但也因为引入了很多内容,导致参数变多,也使得训练难度加大了很多。因此很多时候我们往往会使用效果和LSTM相当但参数更少的GRU来构建大训练量的模型。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。