赞
踩
# 10:输入数据维度大小 20 :隐状态的特征维度 2:层数,表示用来两层lstm
lstm = nn.LSTM(10, 20, 2)
# 5:序列长度 3:单个训练数据长度 10:单个序列维度 举个例子:每次运行时取3个含有5个字的句子(且句子中每个字的维度为10)
input = Variable(torch.randn(5, 3, 10))
# 2个LSTM层,batch_size=3, 隐藏层的特征维度20
h0 = Variable(torch.randn(2, 3, 20))
# 2个LSTM层,batch_size=3, 隐藏层的特征维度20
# 这里有2层lstm,output是最后一层lstm的每个词向量对应隐藏层的输出,其与层数无关,只与序列长度相关
c0 = Variable(torch.randn(2, 3, 20))
#
output, hn = lstm(input, (h0, c0))
class RNN(nn.Module): def __init__(self, input_size, hidden_size, num_layers, num_classes): super(RNN, self).__init__() self.hidden_size = hidden_size self.num_layers = num_layers self.lstm = nn.LSTM(input_size, hidden_size, num_layers, batch_first=True) self.fc = nn.Linear(hidden_size, num_classes) # 2 for bidirection def forward(self, x): # Forward propagate RNN out, _ = self.lstm(x) # Decode hidden state of last time step out = self.fc(out[:, -1, :]) return out rnn = RNN(input_size, hidden_size, num_layers, num_classes) rnn.cuda()
class torch.nn.LSTM( args, * kwargs)[source]
将一个多层的 (LSTM) 应用到输入序列。
参数说明:
LSTM输入: input, (h_0, c_0)
LSTM输出 output, (h_n, c_n)
LSTM模型参数:
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。