当前位置:   article > 正文

基于RNN模型实现文本分类任务实战_rnn文本分类

rnn文本分类
  • 文本分类任务实战

  • 数据集构建:影评数据集进行情感分析(分类任务)
  • 词向量模型:加载训练好的词向量或者自己训练都可以
  • 序列网络模型:训练RNN模型进行识别
  1. import os
  2. import warnings
  3. warnings.filterwarnings("ignore")
  4. import tensorflow as tf
  5. import numpy as np
  6. import pprint
  7. import logging
  8. import time
  9. from collections import Counter
  10. from pathlib import Path
  11. from tqdm import tqdm
  12. # 加载影评数据集,可以手动下载放到对应位置
  13. (x_train, y_train), (x_test, y_test) = tf.keras.datasets.imdb.load_data()
  14. print(x_train.shape)
  15. # 词和ID的映射表,空出来3个的目的是加上特殊字符
  16. _word2idx = tf.keras.datasets.imdb.get_word_index()
  17. word2idx = {w: i+3 for w, i in _word2idx.items()}
  18. word2idx['<pad>'] = 0
  19. word2idx['<start>'] = 1
  20. word2idx['<unk>'] = 2
  21. idx2word = {i: w for w, i in word2idx.items()}
  22. # 按文本长度大小进行排序
  23. def sort_by_len(x, y):
  24. x, y = np.asarray(x), np.asarray(y)
  25. idx = sorted(range(len(x)), key=lambda i: len(x[i]))
  26. return x[idx], y[idx]
  27. # 将中间结果保存到本地,万一程序崩了还得重玩,保存的是文本数据,不是ID
  28. x_train, y_train = sort_by_len(x_train, y_train)
  29. x_test, y_test = sort_by_len(x_test, y_test)
  30. def write_file(f_path, xs, ys):
  31. with open(f_path, 'w',encoding='utf-8') as f:
  32. for x, y in zip(xs, ys):
  33. f.write(str(y)+'\t'+' '.join([idx2word[i] for i in x][1:])+'\n')
  34. write_file('./data/train.txt', x_train, y_train)
  35. write_file('./data/test.txt', x_test, y_test)
  36. # 构建语料表,基于词频来进行统计
  37. counter = Counter()
  38. with open('./data/train.txt',encoding='utf-8') as f:
  39. for line in f:
  40. line = line.rstrip()
  41. label, words = line.split('\t')
  42. words = words.split(' ')
  43. counter.update(words)
  44. words = ['<pad>'] + [w for w, freq in counter.most_common() if freq >= 10]
  45. print('Vocab Size:', len(words))
  46. Path('./vocab').mkdir(exist_ok=True)
  47. with open('./vocab/word.txt', 'w',encoding='utf-8') as f:
  48. for w in words:
  49. f.write(w+'\n')
  50. # 得到新的word2id映射表
  51. word2idx = {}
  52. with open('./vocab/word.txt',encoding='utf-8') as f:
  53. for i, line in enumerate(f):
  54. line = line.rstrip()
  55. word2idx[line] = i
  56. # embedding层
  57. # 可以基于网络来训练,也可以直接加载别人训练好的,一般都是加载预训练模型
  58. # 这里有一些常用的:https://nlp.stanford.edu/projects/glove/
  59. #做了一个大表,里面有20598个不同的词,【20599*50】
  60. embedding = np.zeros((len(word2idx)+1, 50)) # + 1 表示如果不在语料表中,就都是unknow
  61. with open('./data/glove.6B.50d.txt',encoding='utf-8') as f: #下载好的
  62. count = 0
  63. for i, line in enumerate(f):
  64. if i % 100000 == 0:
  65. print('- At line {}'.format(i)) #打印处理了多少数据
  66. line = line.rstrip()
  67. sp = line.split(' ')
  68. word, vec = sp[0], sp[1:]
  69. if word in word2idx:
  70. count += 1
  71. embedding[word2idx[word]] = np.asarray(vec, dtype='float32') #将词转换成对应的向量
  72. print("[%d / %d] words have found pre-trained values"%(count, len(word2idx)))
  73. np.save('./vocab/word.npy', embedding)
  74. print('Saved ./vocab/word.npy')
  75. # 构建训练数据
  76. # 注意所有的输入样本必须都是相同shape(文本长度,词向量维度等)
  77. # 数据生成器
  78. # tf.data.Dataset.from_tensor_slices(tensor):将tensor沿其第一个维度切片,返回一个含有N个样本的数据集,这样做的问题就是需要将整个数据集整体传入,然后切片建立数据集类对象,比较占内存。
  79. # tf.data.Dataset.from_generator(data_generator,output_data_type,output_data_shape):从一个生成器中不断读取样本
  80. def data_generator(f_path, params):
  81. with open(f_path,encoding='utf-8') as f:
  82. print('Reading', f_path)
  83. for line in f:
  84. line = line.rstrip()
  85. label, text = line.split('\t')
  86. text = text.split(' ')
  87. x = [params['word2idx'].get(w, len(word2idx)) for w in text]#得到当前词所对应的ID
  88. if len(x) >= params['max_len']:#截断操作
  89. x = x[:params['max_len']]
  90. else:
  91. x += [0] * (params['max_len'] - len(x))#补齐操作
  92. y = int(label)
  93. yield x, y
  94. def dataset(is_training, params):
  95. _shapes = ([params['max_len']], ())
  96. _types = (tf.int32, tf.int32)
  97. if is_training:
  98. ds = tf.data.Dataset.from_generator(
  99. lambda: data_generator(params['train_path'], params),
  100. output_shapes=_shapes,
  101. output_types=_types, )
  102. ds = ds.shuffle(params['num_samples'])
  103. ds = ds.batch(params['batch_size'])
  104. ds = ds.prefetch(tf.data.experimental.AUTOTUNE) # 设置缓存序列,根据可用的CPU动态设置并行调用的数量,说白了就是加速
  105. else:
  106. ds = tf.data.Dataset.from_generator(
  107. lambda: data_generator(params['test_path'], params),
  108. output_shapes=_shapes,
  109. output_types=_types, )
  110. ds = ds.batch(params['batch_size'])
  111. ds = ds.prefetch(tf.data.experimental.AUTOTUNE)
  112. return ds
  113. # 自定义网络模型
  114. # 定义好都有哪些层
  115. # 前向传播走一遍就行了
  116. class Model(tf.keras.Model):
  117. def __init__(self, params):
  118. super().__init__()
  119. self.embedding = tf.Variable(np.load('./vocab/word.npy'),
  120. dtype=tf.float32,
  121. name='pretrained_embedding',
  122. trainable=False, )
  123. self.drop1 = tf.keras.layers.Dropout(params['dropout_rate'])
  124. self.drop2 = tf.keras.layers.Dropout(params['dropout_rate'])
  125. self.drop3 = tf.keras.layers.Dropout(params['dropout_rate'])
  126. self.rnn1 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(params['rnn_units'], return_sequences=True))
  127. self.rnn2 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(params['rnn_units'], return_sequences=True))
  128. self.rnn3 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(params['rnn_units'], return_sequences=False))
  129. self.drop_fc = tf.keras.layers.Dropout(params['dropout_rate'])
  130. self.fc = tf.keras.layers.Dense(2 * params['rnn_units'], tf.nn.elu)
  131. self.out_linear = tf.keras.layers.Dense(2)
  132. def call(self, inputs, training=False):
  133. if inputs.dtype != tf.int32:
  134. inputs = tf.cast(inputs, tf.int32)
  135. batch_sz = tf.shape(inputs)[0]
  136. rnn_units = 2 * params['rnn_units']
  137. x = tf.nn.embedding_lookup(self.embedding, inputs)
  138. x = self.drop1(x, training=training)
  139. x = self.rnn1(x)
  140. x = self.drop2(x, training=training)
  141. x = self.rnn2(x)
  142. x = self.drop3(x, training=training)
  143. x = self.rnn3(x)
  144. x = self.drop_fc(x, training=training)
  145. x = self.fc(x)
  146. x = self.out_linear(x)
  147. return x
  148. # 二版本:速度会更快
  149. class Model2(tf.keras.Model):
  150. def __init__(self, params):
  151. super().__init__()
  152. self.embedding = tf.Variable(np.load('./vocab/word.npy'),
  153. dtype=tf.float32,
  154. name='pretrained_embedding',
  155. trainable=False, )
  156. self.drop1 = tf.keras.layers.Dropout(params['dropout_rate'])
  157. self.drop2 = tf.keras.layers.Dropout(params['dropout_rate'])
  158. self.drop3 = tf.keras.layers.Dropout(params['dropout_rate'])
  159. self.rnn1 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(params['rnn_units'], return_sequences=True))
  160. self.rnn2 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(params['rnn_units'], return_sequences=True))
  161. self.rnn3 = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(params['rnn_units'], return_sequences=True))
  162. self.drop_fc = tf.keras.layers.Dropout(params['dropout_rate'])
  163. self.fc = tf.keras.layers.Dense(2 * params['rnn_units'], tf.nn.elu)
  164. self.out_linear = tf.keras.layers.Dense(2)
  165. def call(self, inputs, training=False):
  166. if inputs.dtype != tf.int32:
  167. inputs = tf.cast(inputs, tf.int32)
  168. batch_sz = tf.shape(inputs)[0]
  169. rnn_units = 2 * params['rnn_units']
  170. x = tf.nn.embedding_lookup(self.embedding, inputs)
  171. x = tf.reshape(x, (batch_sz * 10 * 10, 10, 50))
  172. x = self.drop1(x, training=training)
  173. x = self.rnn1(x)
  174. x = tf.reduce_max(x, 1)
  175. x = tf.reshape(x, (batch_sz * 10, 10, rnn_units))
  176. x = self.drop2(x, training=training)
  177. x = self.rnn2(x)
  178. x = tf.reduce_max(x, 1)
  179. x = tf.reshape(x, (batch_sz, 10, rnn_units))
  180. x = self.drop3(x, training=training)
  181. x = self.rnn3(x)
  182. x = tf.reduce_max(x, 1)
  183. x = self.drop_fc(x, training=training)
  184. x = self.fc(x)
  185. x = self.out_linear(x)
  186. return x
  187. ### 设置参数
  188. params = {
  189. 'vocab_path': './vocab/word.txt',
  190. 'train_path': './data/train.txt',
  191. 'test_path': './data/test.txt',
  192. 'num_samples': 25000,
  193. 'num_labels': 2,
  194. 'batch_size': 32,
  195. 'max_len': 1000,
  196. 'rnn_units': 200,
  197. 'dropout_rate': 0.2,
  198. 'clip_norm': 10.,
  199. 'num_patience': 3,
  200. 'lr': 3e-4,
  201. }
  202. # 用来判断进行提前停止
  203. def is_descending(history: list):
  204. history = history[-(params['num_patience']+1):]
  205. for i in range(1, len(history)):
  206. if history[i-1] <= history[i]:
  207. return False
  208. return True
  209. word2idx = {}
  210. with open(params['vocab_path'],encoding='utf-8') as f:
  211. for i, line in enumerate(f):
  212. line = line.rstrip()
  213. word2idx[line] = i
  214. params['word2idx'] = word2idx
  215. params['vocab_size'] = len(word2idx) + 1
  216. model = Model(params)
  217. model.build(input_shape=(None, None))#设置输入的大小,或者fit时候也能自动找到
  218. #pprint.pprint([(v.name, v.shape) for v in model.trainable_variables])
  219. #链接:https://tensorflow.google.cn/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay?version=stable
  220. #return initial_learning_rate * decay_rate ^ (step / decay_steps)
  221. decay_lr = tf.optimizers.schedules.ExponentialDecay(params['lr'], 1000, 0.95)#相当于加了一个指数衰减函数
  222. optim = tf.optimizers.Adam(params['lr'])
  223. global_step = 0
  224. history_acc = []
  225. best_acc = .0
  226. t0 = time.time()
  227. logger = logging.getLogger('tensorflow')
  228. logger.setLevel(logging.INFO)
  229. while True:
  230. # 训练模型
  231. for texts, labels in dataset(is_training=True, params=params):
  232. with tf.GradientTape() as tape: # 梯度带,记录所有在上下文中的操作,并且通过调用.gradient()获得任何上下文中计算得出的张量的梯度
  233. logits = model(texts, training=True)
  234. loss = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
  235. loss = tf.reduce_mean(loss)
  236. optim.lr.assign(decay_lr(global_step))
  237. grads = tape.gradient(loss, model.trainable_variables)
  238. grads, _ = tf.clip_by_global_norm(grads, params['clip_norm']) # 将梯度限制一下,有的时候回更新太猛,防止过拟合
  239. optim.apply_gradients(zip(grads, model.trainable_variables)) # 更新梯度
  240. if global_step % 50 == 0:
  241. logger.info("Step {} | Loss: {:.4f} | Spent: {:.1f} secs | LR: {:.6f}".format(
  242. global_step, loss.numpy().item(), time.time() - t0, optim.lr.numpy().item()))
  243. t0 = time.time()
  244. global_step += 1
  245. # 验证集效果
  246. m = tf.keras.metrics.Accuracy()
  247. for texts, labels in dataset(is_training=False, params=params):
  248. logits = model(texts, training=False)
  249. y_pred = tf.argmax(logits, axis=-1)
  250. m.update_state(y_true=labels, y_pred=y_pred)
  251. acc = m.result().numpy()
  252. logger.info("Evaluation: Testing Accuracy: {:.3f}".format(acc))
  253. history_acc.append(acc)
  254. if acc > best_acc:
  255. best_acc = acc
  256. logger.info("Best Accuracy: {:.3f}".format(best_acc))
  257. if len(history_acc) > params['num_patience'] and is_descending(history_acc):
  258. logger.info("Testing Accuracy not improved over {} epochs, Early Stop".format(params['num_patience']))
  259. break

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/码创造者/article/detail/884921
推荐阅读
相关标签
  

闽ICP备14008679号