当前位置:   article > 正文

LSTM进行字符级文本生成_3(pytorch实现)_文本生成任务 训练代码

文本生成任务 训练代码
文章目录
  • 基于pytorch的LSTM进行字符集文本生成
  • 前言
  • 一、数据集
  • 二、代码实现
    • 1.到入库和LSTM进行模型构建
    • 2.数据预处理函数
    • 3.训练函数
    • 4.预测函数
    • 5.文本生成函数
    • 6.主函数
  • 完整代码
  • 总结

前言

本文介绍了机器学习中深度学习的内容使用pytorch构建LSTM模型进行字符级文本生成任务

一、数据集

https://download.csdn.net/download/qq_52785473/78428834

二、代码实现

1.导入库及LSTM模型构建

代码如下:

  1. # coding: utf-8
  2. import torch
  3. import torch.nn as nn
  4. import numpy as np
  5. import pandas as pd
  6. import matplotlib.pyplot as plt
  7. from sklearn.preprocessing import OneHotEncoder
  8. import torch.nn.functional as F
  9. class lstm_model(nn.Module):
  10. def __init__(self, vocab, hidden_size, num_layers, dropout=0.5):
  11. super(lstm_model, self).__init__()
  12. self.vocab = vocab # 字符数据集
  13. # 索引,字符
  14. self.int_char = {i: char for i, char in enumerate(vocab)}
  15. self.char_int = {char: i for i, char in self.int_char.items()}
  16. # 对字符进行one-hot encoding
  17. self.encoder = OneHotEncoder(sparse=True).fit(vocab.reshape(-1, 1))
  18. self.hidden_size = hidden_size
  19. self.num_layers = num_layers
  20. # lstm层
  21. self.lstm = nn.LSTM(len(vocab), hidden_size, num_layers, batch_first=True, dropout=dropout)
  22. # 全连接层
  23. self.linear = nn.Linear(hidden_size, len(vocab))
  24. def forward(self, sequence, hs=None):
  25. out, hs = self.lstm(sequence, hs) # lstm的输出格式(batch_size, sequence_length, hidden_size
  26. out = out.reshape(-1, self.hidden_size) # 这里需要将out转换为linear的输入格式,即(batch_size * sequence_length, hidden_size
  27. output = self.linear(out) # linear的输出格式,(batch_size * sequence_length, vocab_size)
  28. return output, hs
  29. def onehot_encode(self, data): # 对数据进行编码
  30. return self.encoder.transform(data)
  31. def onehot_decode(self, data): # 对数据进行解码
  32. return self.encoder.inverse_transform(data)
  33. def label_encode(self, data): # 对标签进行编码
  34. return np.array([self.char_int[ch] for ch in data])
  35. def label_decode(self, data): # 对标签进行解码
  36. return np.array([self.int_char[ch] for ch in data])

2.数据预处理函数

  1. def get_batches(data, batch_size, seq_len):
  2. '''
  3. :param data: 源数据,输入格式(num_samples, num_features)
  4. :param batch_size: batch的大小
  5. :param seq_len: 序列的长度(精度)
  6. :return: (batch_size, seq_len, num_features)
  7. '''
  8. num_features = data.shape[1]
  9. num_chars = batch_size * seq_len # 一个batch_size的长度
  10. num_batches = int(np.floor(data.shape[0] / num_chars)) # 计算出有多少个batches
  11. need_chars = num_batches * num_chars # 计算出需要的总字符量
  12. targets = np.vstack((data[1:].A, data[0].A)) # 可能版本问题,取成numpy比较好reshape
  13. inputs = data[:need_chars].A.astype("int") # 从原始数据data中截取所需的字符数量need_words
  14. targets = targets[:need_chars]
  15. targets = targets.reshape(batch_size, -1, num_features)
  16. inputs = inputs.reshape(batch_size, -1, num_features)
  17. for i in range(0, inputs.shape[1], seq_len):
  18. x = inputs[:, i: i+seq_len]
  19. y = targets[:, i: i+seq_len]
  20. yield x, y # 节省内存

3.训练函数

  1. def train(model, data, batch_size, seq_len, epochs, lr=0.01, valid=None):
  2. device = 'cuda' if torch.cuda.is_available() else 'cpu'
  3. model = model.to(device)
  4. optimizer = torch.optim.Adam(model.parameters(), lr=lr)
  5. criterion = nn.CrossEntropyLoss()
  6. if valid is not None:
  7. data = model.onehot_encode(data.reshape(-1, 1))
  8. valid = model.onehot_encode(valid.reshape(-1, 1))
  9. else:
  10. data = model.onehot_encode(data.reshape(-1, 1))
  11. train_loss = []
  12. val_loss = []
  13. for epoch in range(epochs):
  14. model.train()
  15. hs = None # hs等于hidden_size隐藏层节点
  16. train_ls = 0.0
  17. val_ls = 0.0
  18. for x, y in get_batches(data, batch_size, seq_len):
  19. optimizer.zero_grad()
  20. x = torch.tensor(x).float().to(device)
  21. out, hs = model(x, hs)
  22. hs = ([h.data for h in hs])
  23. y = y.reshape(-1, len(model.vocab))
  24. y = model.onehot_decode(y)
  25. y = model.label_encode(y.squeeze())
  26. y = torch.from_numpy(y).long().to(device)
  27. loss = criterion(out, y.squeeze())
  28. loss.backward()
  29. optimizer.step()
  30. train_ls += loss.item()
  31. if valid is not None:
  32. model.eval()
  33. hs = None
  34. with torch.no_grad():
  35. for x, y in get_batches(valid, batch_size, seq_len):
  36. x = torch.tensor(x).float().to(device) # x为一组测试数据,包含batch_size * seq_len个字
  37. out, hs = model(x, hs)
  38. # out.shape输出为tensor[batch_size * seq_len, vocab_size]
  39. hs = ([h.data for h in hs]) # 更新参数
  40. y = y.reshape(-1, len(model.vocab)) # y.shape为(128,100,43),因此需要转成两维,每行就代表一个字了,43为字典大小
  41. y = model.onehot_decode(y) # y标签即为测试数据各个字的下一个字,进行one_hot解码,即变为字符
  42. # 但是此时y 是[[..],[..]]形式
  43. y = model.label_encode(y.squeeze()) # 因此需要去掉一维才能成功解码
  44. # 此时y为[12...]成为一维的数组,每个代表自己字典里对应字符的字典序
  45. y = torch.from_numpy(y).long().to(device)
  46. # 这里y和y.squeeze()出来的东西一样,可能这里没啥用,不太懂
  47. loss = criterion(out, y.squeeze()) # 计算损失值
  48. val_ls += loss.item()
  49. val_loss.append(np.mean(val_ls))
  50. train_loss.append(np.mean(train_ls))
  51. print("train_loss:", train_ls)
  52. plt.plot(train_loss, label="train_loss")
  53. plt.plot(val_loss, label="val loss")
  54. plt.title("loop vs epoch")
  55. plt.legend()
  56. plt.show()
  57. model_name = "lstm_model.net"
  58. with open(model_name, 'wb') as f: # 训练完了保存模型
  59. torch.save(model.state_dict(), f)

4.预测函数

  1. def predict(model, char, top_k=None, hidden_size=None):
  2. device = 'cuda' if torch.cuda.is_available() else 'cpu'
  3. model.to(device)
  4. model.eval() # 固定参数
  5. with torch.no_grad():
  6. char = np.array([char]) # 输入一个字符,预测下一个字是什么,先转成numpy
  7. char = char.reshape(-1, 1) # 变成二维才符合编码规范
  8. char_encoding = model.onehot_encode(char).A # 对char进行编码,取成numpy比较方便reshape
  9. char_encoding = char_encoding.reshape(1, 1, -1) # char_encoding.shape为(1, 1, 43)变成三维才符合模型输入格式
  10. char_tensor = torch.tensor(char_encoding, dtype=torch.float32) # 转成tensor
  11. char_tensor = char_tensor.to(device)
  12. out, hidden_size = model(char_tensor, hidden_size) # 放入模型进行预测,out为结果
  13. probs = F.softmax(out, dim=1).squeeze() # 计算预测值,即所有字符的概率
  14. if top_k is None: # 选择概率最大的top_k个
  15. indices = np.arange(vocab_size)
  16. else:
  17. probs, indices = probs.topk(top_k)
  18. indices = indices.cpu().numpy()
  19. probs = probs.cpu().numpy()
  20. char_index = np.random.choice(indices, p=probs/probs.sum()) # 随机选择一个字符索引作为预测值
  21. char = model.int_char[char_index] # 通过索引找出预测字符
  22. return char, hidden_size

5.文本生成函数

  1. def sample(model, length, top_k=None, sentence="c"):
  2. hidden_size = None
  3. new_sentence = [char for char in sentence]
  4. for i in range(length):
  5. next_char, hidden_size = predict(model, new_sentence[-1], top_k=top_k, hidden_size=hidden_size)
  6. new_sentence.append(next_char)
  7. return "".join(new_sentence)

6.主函数

  1. def main():
  2. hidden_size = 512
  3. num_layers = 2
  4. batch_size = 128
  5. seq_len = 100
  6. epochs = 2
  7. lr = 0.01
  8. f = pd.read_csv("../datasets/dev.tsv", sep="\t", header=None)
  9. f = f[0]
  10. text = list(f)
  11. text = ".".join(text)
  12. vocab = np.array(sorted(set(text))) # 建立字典
  13. vocab_size = len(vocab)
  14. val_len = int(np.floor(0.2 * len(text))) # 划分训练测试集
  15. trainset = np.array(list(text[:-val_len]))
  16. validset = np.array(list(text[-val_len:]))
  17. model = lstm_model(vocab, hidden_size, num_layers) # 模型实例化
  18. train(model, trainset, batch_size, seq_len, epochs, lr=lr, valid=validset) # 训练模型
  19. model.load_state_dict(torch.load("lstm_model.net")) # 调用保存的模型
  20. new_text = sample(model, 100, top_k=5) # 预测模型,生成100个字符,预测时选择概率最大的前5
  21. print(new_text) # 输出预测文本
  22. if __name__ == "__main__":
  23. main()

本代码还是有很大改进空间,例如进行词语级的文本生成,以及使用word2vec等引入词向量等,都可以是的模型获得更好的效果。

完整代码

  1. # coding: utf-8
  2. import torch
  3. import torch.nn as nn
  4. import numpy as np
  5. import pandas as pd
  6. import matplotlib.pyplot as plt
  7. from sklearn.preprocessing import OneHotEncoder
  8. import torch.nn.functional as F
  9. class lstm_model(nn.Module):
  10. def __init__(self, vocab, hidden_size, num_layers, dropout=0.5):
  11. super(lstm_model, self).__init__()
  12. self.vocab = vocab # 字符数据集
  13. # 索引,字符
  14. self.int_char = {i: char for i, char in enumerate(vocab)}
  15. self.char_int = {char: i for i, char in self.int_char.items()}
  16. # 对字符进行one-hot encoding
  17. self.encoder = OneHotEncoder(sparse=True).fit(vocab.reshape(-1, 1))
  18. self.hidden_size = hidden_size
  19. self.num_layers = num_layers
  20. # lstm层
  21. self.lstm = nn.LSTM(len(vocab), hidden_size, num_layers, batch_first=True, dropout=dropout)
  22. # 全连接层
  23. self.linear = nn.Linear(hidden_size, len(vocab))
  24. def forward(self, sequence, hs=None):
  25. out, hs = self.lstm(sequence, hs) # lstm的输出格式(batch_size, sequence_length, hidden_size
  26. out = out.reshape(-1, self.hidden_size) # 这里需要将out转换为linear的输入格式,即(batch_size * sequence_length, hidden_size
  27. output = self.linear(out) # linear的输出格式,(batch_size * sequence_length, vocab_size)
  28. return output, hs
  29. def onehot_encode(self, data):
  30. return self.encoder.transform(data)
  31. def onehot_decode(self, data):
  32. return self.encoder.inverse_transform(data)
  33. def label_encode(self, data):
  34. return np.array([self.char_int[ch] for ch in data])
  35. def label_decode(self, data):
  36. return np.array([self.int_char[ch] for ch in data])
  37. def get_batches(data, batch_size, seq_len):
  38. '''
  39. :param data: 源数据,输入格式(num_samples, num_features)
  40. :param batch_size: batch的大小
  41. :param seq_len: 序列的长度(精度)
  42. :return: (batch_size, seq_len, num_features)
  43. '''
  44. num_features = data.shape[1]
  45. num_chars = batch_size * seq_len # 一个batch_size的长度
  46. num_batches = int(np.floor(data.shape[0] / num_chars)) # 计算出有多少个batches
  47. need_chars = num_batches * num_chars # 计算出需要的总字符量
  48. targets = np.vstack((data[1:].A, data[0].A)) # 可能版本问题,取成numpy比较好reshape
  49. inputs = data[:need_chars].A.astype("int") # 从原始数据data中截取所需的字符数量need_words
  50. targets = targets[:need_chars]
  51. targets = targets.reshape(batch_size, -1, num_features)
  52. inputs = inputs.reshape(batch_size, -1, num_features)
  53. for i in range(0, inputs.shape[1], seq_len):
  54. x = inputs[:, i: i+seq_len]
  55. y = targets[:, i: i+seq_len]
  56. yield x, y # 节省内存
  57. def train(model, data, batch_size, seq_len, epochs, lr=0.01, valid=None):
  58. device = 'cuda' if torch.cuda.is_available() else 'cpu'
  59. model = model.to(device)
  60. optimizer = torch.optim.Adam(model.parameters(), lr=lr)
  61. criterion = nn.CrossEntropyLoss()
  62. if valid is not None:
  63. data = model.onehot_encode(data.reshape(-1, 1))
  64. valid = model.onehot_encode(valid.reshape(-1, 1))
  65. else:
  66. data = model.onehot_encode(data.reshape(-1, 1))
  67. train_loss = []
  68. val_loss = []
  69. for epoch in range(epochs):
  70. model.train()
  71. hs = None # hs等于hidden_size隐藏层节点
  72. train_ls = 0.0
  73. val_ls = 0.0
  74. for x, y in get_batches(data, batch_size, seq_len):
  75. optimizer.zero_grad()
  76. x = torch.tensor(x).float().to(device)
  77. out, hs = model(x, hs)
  78. hs = ([h.data for h in hs])
  79. y = y.reshape(-1, len(model.vocab))
  80. y = model.onehot_decode(y)
  81. y = model.label_encode(y.squeeze())
  82. y = torch.from_numpy(y).long().to(device)
  83. loss = criterion(out, y.squeeze())
  84. loss.backward()
  85. optimizer.step()
  86. train_ls += loss.item()
  87. if valid is not None:
  88. model.eval()
  89. hs = None
  90. with torch.no_grad():
  91. for x, y in get_batches(valid, batch_size, seq_len):
  92. x = torch.tensor(x).float().to(device) # x为一组测试数据,包含batch_size * seq_len个字
  93. out, hs = model(x, hs)
  94. # out.shape输出为tensor[batch_size * seq_len, vocab_size]
  95. hs = ([h.data for h in hs]) # 更新参数
  96. y = y.reshape(-1, len(model.vocab)) # y.shape为(128,100,43),因此需要转成两维,每行就代表一个字了,43为字典大小
  97. y = model.onehot_decode(y) # y标签即为测试数据各个字的下一个字,进行one_hot解码,即变为字符
  98. # 但是此时y 是[[..],[..]]形式
  99. y = model.label_encode(y.squeeze()) # 因此需要去掉一维才能成功解码
  100. # 此时y为[12...]成为一维的数组,每个代表自己字典里对应字符的字典序
  101. y = torch.from_numpy(y).long().to(device)
  102. # 这里y和y.squeeze()出来的东西一样,可能这里没啥用,不太懂
  103. loss = criterion(out, y.squeeze()) # 计算损失值
  104. val_ls += loss.item()
  105. val_loss.append(np.mean(val_ls))
  106. train_loss.append(np.mean(train_ls))
  107. print("train_loss:", train_ls)
  108. plt.plot(train_loss, label="train_loss")
  109. plt.plot(val_loss, label="val loss")
  110. plt.title("loop vs epoch")
  111. plt.legend()
  112. plt.show()
  113. model_name = "lstm_model.net"
  114. with open(model_name, 'wb') as f: # 训练完了保存模型
  115. torch.save(model.state_dict(), f)
  116. def predict(model, char, top_k=None, hidden_size=None):
  117. device = 'cuda' if torch.cuda.is_available() else 'cpu'
  118. model.to(device)
  119. model.eval() # 固定参数
  120. with torch.no_grad():
  121. char = np.array([char]) # 输入一个字符,预测下一个字是什么,先转成numpy
  122. char = char.reshape(-1, 1) # 变成二维才符合编码规范
  123. char_encoding = model.onehot_encode(char).A # 对char进行编码,取成numpy比较方便reshape
  124. char_encoding = char_encoding.reshape(1, 1, -1) # char_encoding.shape为(1, 1, 43)变成三维才符合模型输入格式
  125. char_tensor = torch.tensor(char_encoding, dtype=torch.float32) # 转成tensor
  126. char_tensor = char_tensor.to(device)
  127. out, hidden_size = model(char_tensor, hidden_size) # 放入模型进行预测,out为结果
  128. probs = F.softmax(out, dim=1).squeeze() # 计算预测值,即所有字符的概率
  129. if top_k is None: # 选择概率最大的top_k个
  130. indices = np.arange(vocab_size)
  131. else:
  132. probs, indices = probs.topk(top_k)
  133. indices = indices.cpu().numpy()
  134. probs = probs.cpu().numpy()
  135. char_index = np.random.choice(indices, p=probs/probs.sum()) # 随机选择一个字符索引作为预测值
  136. char = model.int_char[char_index] # 通过索引找出预测字符
  137. return char, hidden_size
  138. def sample(model, length, top_k=None, sentence="c"):
  139. hidden_size = None
  140. new_sentence = [char for char in sentence]
  141. for i in range(length):
  142. next_char, hidden_size = predict(model, new_sentence[-1], top_k=top_k, hidden_size=hidden_size)
  143. new_sentence.append(next_char)
  144. return "".join(new_sentence)
  145. def main():
  146. hidden_size = 512
  147. num_layers = 2
  148. batch_size = 128
  149. seq_len = 100
  150. epochs = 2
  151. lr = 0.01
  152. f = pd.read_csv("../datasets/dev.tsv", sep="\t", header=None)
  153. f = f[0]
  154. text = list(f)
  155. text = ".".join(text)
  156. vocab = np.array(sorted(set(text))) # 建立字典
  157. vocab_size = len(vocab)
  158. val_len = int(np.floor(0.2 * len(text))) # 划分训练测试集
  159. trainset = np.array(list(text[:-val_len]))
  160. validset = np.array(list(text[-val_len:]))
  161. model = lstm_model(vocab, hidden_size, num_layers) # 模型实例化
  162. train(model, trainset, batch_size, seq_len, epochs, lr=lr, valid=validset) # 训练模型
  163. model.load_state_dict(torch.load("lstm_model.net")) # 调用保存的模型
  164. new_text = sample(model, 100, top_k=5) # 预测模型,生成100个字符,预测时选择概率最大的前5
  165. print(new_text) # 输出预测文本
  166. if __name__ == "__main__":
  167. main()

总结

这个案例他数据预处理的时候,一个序列对应一个序列的关系,例如abcd对应的标签为dabc,而不是一个字符,因此可能后面进行了某些操作使得他变成一个字符对应一个字符标签的操作了吧,从而使得预测的时候,只能通过一个字符预测其后面的字符,这就有点失去循环神经网络精髓的味道了,感觉是割裂字符之间的关系,变成一个普通单纯的分类了。

循环神经网络,因为能够处理序列位置的信息,需要设定一个滑动窗口值,或者说时间步长什么的,作用应该就是保留序列特征,例如abcdef为训练数据,设置滑动窗口为3的话,那么按照正常的序列思路可以划分为abc-d、bcd-e、cde-f作为训练数据的形式,即连续的三个字符对应的标签为其后面一个字符,那么我们训练出来的模型也是需要输入三个字符,然后生成一个字符,再用预测出来的字符加上他前面两个字符再预测新的字符,例如预测的初始序列为abc加入abc预测出来d,那么下一次预测就是bcd作为输入,就像一个窗口一步一步滑动过去一样,窗口的大小就为开始设定的3。

因此对于这个案例,它虽然seq_len=100,即滑动窗口为100,但是它的训练数据就不太对,而且模型预测时也是一个字符预测下一个字符,并没有体现滑动窗口的思想,因此这个代码案例大家可以自己进行改进优化。

下面是对代码部分地方的改动,使得它能够按滑动窗口的思维来进行训练和预测

数据预处理函数

  1. def get_batches(data, batch_size, seq_len):
  2. '''
  3. :param data: 源数据,输入格式(num_samples, num_features)
  4. :param batch_size: batch的大小
  5. :param seq_len: 序列的长度(精度)
  6. :return: (batch_size, seq_len, num_features)
  7. '''
  8. num_features = data.shape[1]
  9. num_chars = batch_size * seq_len # 一个batch_size的长度
  10. num_batches = int(np.floor(data.shape[0] / num_chars)) # 计算出有多少个batches
  11. need_chars = num_batches * num_chars # 计算出需要的总字符量
  12. targets = np.vstack((data[1:].A, data[0].A)) # 可能版本问题,取成numpy比较好reshape
  13. inputs = data[:need_chars].A.astype("int") # 从原始数据data中截取所需的字符数量need_words
  14. targets = targets[:need_chars]
  15. train_data = np.zeros((inputs.shape[0] - seq_len, seq_len, num_features))
  16. train_label = np.zeros((inputs.shape[0] - seq_len, num_features))
  17. for i in range(0, inputs.shape[0] - seq_len, 1):
  18. # inputs就是字符数 * 词向量大小(表示一个字符)
  19. # 思路就是abcd中ab-c, bc-d,一共4-3+1
  20. train_data[i] = inputs[i:i+seq_len] # 每seq_len=100的字符
  21. train_label[i] = inputs[i+seq_len-1] # 训练标签就为他后面那个字符
  22. print(train_data.shape)
  23. print(train_label.shape)
  24. for i in range(0, inputs.shape[0] - seq_len, batch_size):
  25. x = train_data[i:i+batch_size] # 每batch_size=128个一起进行训练更新参数
  26. y = train_label[i:i+batch_size] # 对应的128个标签
  27. print(x.shape)
  28. print(y.shape)
  29. print("-----------")
  30. yield x, y

模型构建部分

  1. class lstm_model(nn.Module):
  2. def __init__(self, vocab, hidden_size, num_layers, dropout=0.5, seq_len=100):
  3. super(lstm_model, self).__init__()
  4. self.seq_len = seq_len
  5. self.vocab = vocab # 字符数据集
  6. # 索引,字符
  7. self.int_char = {i: char for i, char in enumerate(vocab)}
  8. self.char_int = {char: i for i, char in self.int_char.items()}
  9. # 对字符进行one-hot encoding
  10. self.encoder = OneHotEncoder(sparse=True).fit(vocab.reshape(-1, 1))
  11. self.hidden_size = hidden_size
  12. self.num_layers = num_layers
  13. # lstm层
  14. self.lstm = nn.LSTM(len(vocab), hidden_size, num_layers, batch_first=True, dropout=dropout)
  15. # 全连接层
  16. self.linear = nn.Linear(hidden_size, len(vocab))
  17. def forward(self, sequence, hs=None):
  18. # print("==========")
  19. # print("forward:", sequence.shape)
  20. out, hs = self.lstm(sequence, hs) # lstm的输出格式(batch_size, sequence_length, hidden_size
  21. print("----", out.shape)
  22. # out = out.reshape(-1, self.hidden_size) # 这里需要将out转换为linear的输入格式,即(batch_size * sequence_length, hidden_size
  23. print("========", out[:, -1].shape)
  24. output = self.linear(out[:, -1]) # 只取[bacth_size,hidden_size],即找到batch_size里每个元素的标签吧
  25. print("output-----:", output.shape)
  26. return output, hs
  27. def onehot_encode(self, data):
  28. return self.encoder.transform(data)
  29. def onehot_decode(self, data):
  30. return self.encoder.inverse_transform(data)
  31. def label_encode(self, data):
  32. return np.array([self.char_int[ch] for ch in data])
  33. def label_decode(self, data):
  34. return np.array([self.int_char[ch] for ch in data])

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/小小林熬夜学编程/article/detail/466559
推荐阅读
相关标签
  

闽ICP备14008679号