赞
踩
【前沿重器】
全新栏目,本栏目主要和大家一起讨论近期自己学习的心得和体会,与大家一起成长。具体介绍:仓颉专项:飞机大炮我都会,利器心法我还有。
往期回顾
前沿重器[1] | 微软小冰-多轮和情感机器人的先行者
前沿重器[2] | 美团搜索理解和召回
前沿重器[3] | 平安智能问答系统
心法利器[1] | NLP知识蒸馏思考
心法利器[2] | 统计语言模型使用反思
心法利器[3] | tf.keras自学笔记
前几天学了tf.keras,趁热打铁我就整了一个自己比较熟悉的文本分类任务来试试手,效果可能不是很重要,重要的是能把流程走通,所以一切从简。
当然,我这里不想只是简单的弄个流程,码面条那么简单,我还是希望能整一次完整的工程化的代码来练练手,2天时间,纯手打欢迎各位前辈拍砖,也希望对各位有所帮助吧。
懒人目录:
先聊聊整套方案的算法视角思路,文本分类任务的常规基线是TextCNN,这里为了简单我只用了一个简单的卷积层,而没有用TextCNN里面那种复杂形式(有关这个模型的具体解释详见:NLP.TM[24] | TextCNN的个人理解)。
先来看核心代码的文件夹结构:
这里面的base_pipline.py
是一套完整的流程化代码,接近180行,覆盖加载数据、预处理、训练、测试等全流程,当然我不满足于此,我把里面的关键步骤模块化,形成一个个分别的模块来分别实现。
预训练我单独拉出来,没有和整体模型放一起,主要是因为预训练模型需要单独训练,也完整地维护了起来,这里我只写了个最简单的word2vector,模型部分使用的也只是调了gensim的包,来看看完整的类代码。
import numpy as npfrom nlu_model.util.pkl_impl import save_pkl, load_pklfrom gensim.models.word2vec import Word2Vecclass Word2vector(object): def __init__(self): self.word2idx_dic = {} self.embedding_weights = [] def train(self, train_data, # 训练数据 N_DIM = 300, # word2vec的数量 MIN_COUNT = 5, # 保证出现的词数足够做才进入词典 w2v_EPOCH = 15, # w2v的训练迭代次数 MAXLEN = 50 # 句子最大长度 ): self.N_DIM = N_DIM self.MIN_COUNT = MIN_COUNT self.w2v_EPOCH = w2v_EPOCH self.MAXLEN = MAXLEN # Initialize model and build vocab imdb_w2v = Word2Vec(size=N_DIM, min_count=MIN_COUNT) imdb_w2v.build_vocab(train_data) # Train the model over train_reviews (this may take several minutes) imdb_w2v.train(train_data, total_examples=len(train_data), epochs=w2v_EPOCH) # word2vec后处理 n_symbols = len(imdb_w2v.wv.vocab.keys()) + 2 embedding_weights = [[0 for i in range(N_DIM)] for i in range(n_symbols)] np.zeros((n_symbols, 300)) idx = 1 word2idx_dic = {} w2v_model_metric = [] for w in imdb_w2v.wv.vocab.keys(): embedding_weights[idx] = imdb_w2v[w] word2idx_dic[w] = idx idx = idx + 1 # 留给未登录词的位置 avg_weights = [0 for i in range(N_DIM)] for wd in word2idx_dic: avg_weights = [(avg_weights[idx]+embedding_weights[word2idx_dic[wd]][idx]) for idx in range(N_DIM)] avg_weights = [avg_weights[idx] / len(word2idx_dic) for idx in range(N_DIM)] embedding_weights[idx] = avg_weights word2idx_dic[""] = idx # 留给pad的位置 word2idx_dic[""] = 0 self.word2idx_dic = word2idx_dic self.embedding_weights = embedding_weights def save(self, word2idx_dic_path, # 词到ID词典路径 embedding_path, # embedding词向量路径 model_conf_path # 模型配置加载) ): # 保存w2id词典 save_pkl(word2idx_dic_path, self.word2idx_dic) # 保存词向量矩阵 save_pkl(embedding_path, self.embedding_weights) # 保存配置 save_pkl(model_conf_path, [self.N_DIM, self.MIN_COUNT, self.w2v_EPOCH, self.MAXLEN]) def __load_default__(self): self.load("./data/ptm/shopping_reviews/w2v_word2idx2020100601.pkl", "./data/ptm/shopping_reviews/w2v_model_metric_2020100601.pkl", "./data/ptm/shopping_reviews/w2v_model_conf_2020100601.pkl") def load(self, word2idx_dic_path, embedding_path, model_conf_path): self.N_DIM, self.MIN_COUNT, self.w2v_EPOCH, self.MAXLEN = load_pkl(model_conf_path) self.embedding_weights = load_pkl(embedding_path) self.word2idx_dic = load_pkl(word2idx_dic_path) def word2idx(self, word): if len(self.word2idx_dic) == 0: self.__load_default__() if word in self.word2idx_dic: return self.word2idx_dic[word] else: return len(self.word2idx_dic) - 1 def sentence2idx(self, sentence): sentence_idx = [] for idx in range(len(sentence)): sentence_idx.append(self.word2idx(sentence[idx])) return sentence_idx def batch2idx(self, source_data): result_data = [] for idx in range(len(source_data)): result_data.append(self.sentence2idx(source_data[idx])) return result_data def get_np_weights(self): return np.array(self.embedding_weights)
这里占比最大的是模型训练过程中的数据处理,剩下都是围绕着这个训练完的word2vector做的一些操作,我来画几个重点吧:
pkl_impl
我是怎么写的:import pickledef save_pkl(path, data): output = open(path, 'wb') pickle.dump(data, output) output.close()def load_pkl(path): pkl_file = open(path, 'rb') data = pickle.load(pkl_file) pkl_file.close() return data
当然,后续还可能有更多预训练的模型,自己可以再调整,这也是模块化的好处,后续要更新模型,就和换零件一样。
分类模型这块是分了两层,一个cls模型类,一个具体的模型也把他整成一个类了(这个后续会整一个抽象类让他继承吧)。
首先看具体模型的类,textcnn_small,毕竟这个不是正儿八经的那个textcnn。
from tensorflow import kerasclass TextCNNSmall(): """docstring for TextCNNSmall""" def __init__(self, model_conf, train_conf={"batch_size":64,"epochs":3, "verbose":1}): self.model_conf = model_conf self.train_conf = train_conf self.__build_structure__() def __build_structure__(self): inputs = keras.layers.Input(shape=(self.model_conf["MAX_LEN"],)) embedding_layer = keras.layers.Embedding(output_dim = self.model_conf["w2c_len"], input_dim = len(self.model_conf["emb_model"].embedding_weights), weights=[self.model_conf["emb_model"].get_np_weights()], input_length=self.model_conf["MAX_LEN"], trainable=True ) x = embedding_layer(inputs) l_conv1 = keras.layers.Conv1D(filters=self.model_conf["w2c_len"], kernel_size=3, activation='relu')(x) l_pool1 = keras.layers.MaxPool1D(pool_size=3)(l_conv1) l_pool11 = keras.layers.Flatten()(l_pool1) out = keras.layers.Dropout(0.5)(l_pool11) output = keras.layers.Dense(32, activation='relu')(out) pred = keras.layers.Dense(units=1, activation='sigmoid')(output) self.model = keras.models.Model(inputs=inputs, outputs=pred) self.model.summary() self.model.compile(loss="binary_crossentropy", optimizer="adam", metrics=['accuracy']) def fit(self, x_train, y_train, x_test, y_test): history = self.model.fit(x_train, y_train, batch_size=self.train_conf.get("batch_size", 64), epochs=self.train_conf.get("epochs", 3), validation_data=(x_test, y_test), verbose=self.train_conf.get("verbose", 1)) return history def evaluate(self, x_test, y_test): return self.model.evaluate(x_test, y_test) def predict(self, sentences): return self.model.predict(sentences) def save(self, path): if self.model: self.model.save(path) def load(self, path): self.model = keras.load_model(path)
就科研而言模型还是核心,但实际上我还做了很多模型法之外的事情:
模型类之上我还整了一个模型类,用来对各种同类型的模型进行维护。
import jiebafrom tensorflow import kerasfrom nlu_model.cls.model.textcnn_small import TextCNNSmallclass ClsModel(object): def __init__(self, model_choice, model_conf={}, train_conf={}): self.model_choice = model_choice self.model_conf = model_conf self.train_conf = train_conf self.__model_select__() def __model_select__(self): if self.model_choice == "textcnn_small": self.model = TextCNNSmall(self.model_conf, self.train_conf) if self.model_choice == "load": self.load(self.model_conf["path"]) def preprocess(self, sentences): sentences = [list(jieba.cut(i)) for i in sentences] sentence_id = self.model_conf["emb_model"].batch2idx(sentences) return keras.preprocessing.sequence.pad_sequences(sentence_id, value=0, padding='post', maxlen=50) def fit(self, x_train, y_train, x_test, y_test): return self.model.fit(x_train, y_train, x_test, y_test) def evaluate(self, x_test, y_test): return self.model.evaluate(x_test, y_test) def pred(self, sentence): return self.model.predict(sentence) def predict(self, sentences): sentence_id = self.preprocess(sentences) return self.pred([sentence_id])[0][0] def save(self, path): self.model.save(path) def load(self, path): self.model = keras.models.load_model(path)
同样画画重点。
__model_select__
来进行统一维护和选择,目前支持两种模式,textcnn_small
即一个具体的模型,另外的load
是加载模式,维护具体的一个模型。__
用来区分是private
类型成员还是public
类型成员,即外界是否能读到,一般不需要外界读的尽量整成__
,这里我还需要优化。有了这些模块,我们就需要把他们给串起来了,这里我用的是网上找的一套电商评论好坏评的分类数据(https://blog.csdn.net/churximi/article/details/61210129)
首先是预处理部分:
# data preprocessdef loadfile(): # 加载并预处理模型 neg = pd.read_excel('./data/cls/shopping_reviews/neg.xls', header=None, index=None) pos = pd.read_excel('./data/cls/shopping_reviews/pos.xls', header=None, index=None) def cw(x): punctuation = r"[\s+\.\!\/_,$%^*(+\"\']+|[+——!,。?、~@#¥%……&*():]+" x = re.sub(punctuation, "", x) return list(jieba.cut(x)) pos['words'] = pos[0].apply(cw) neg['words'] = neg[0].apply(cw) y = np.concatenate((np.ones(len(pos)), np.zeros(len(neg)))) x_train, x_test, y_train, y_test = train_test_split( np.concatenate((pos['words'], neg['words'])), y, test_size=0.2) return x_train, x_test, y_train, y_testx_train, x_test, y_train, y_test = loadfile()with open("./data/cls/shopping_reviews/train.txt", "w") as f: for idx in range(len(x_train)): f.write("%s\t%s\n" % (y_train[idx], " ".join(x_train[idx])))with open("./data/cls/shopping_reviews/test.txt", "w") as f: for idx in range(len(x_test)): f.write("%s\t%s\n" % (y_test[idx], " ".join(x_test[idx])))
源文件在excel里面,我去了标点后切词,切完之后做了数据划分,然后分别保存起来了。
word2vector = Word2vector()word2vector.train(x_train)word2vector.save("./data/ptm/shopping_reviews/w2v_word2idx2020100601.pkl", "./data/ptm/shopping_reviews/w2v_model_metric_2020100601.pkl", "./data/ptm/shopping_reviews/w2v_model_conf_2020100601.pkl")
然后是Word2vector的训练,可以看到这块代码非常简洁方便,后续自己再用的时候就很舒服,所以我本身非常喜欢去包装这些东西。
建模这块,主要有这几个细节步骤:
然后来看看代码:
# 训练测试数据的结构转化,要使其符合模型需要的类型。x_train = word2vector.batch2idx(x_train)x_test = word2vector.batch2idx(x_test)x_train = keras.preprocessing.sequence.pad_sequences(x_train, value=0, padding='post', maxlen=50)x_test = keras.preprocessing.sequence.pad_sequences(x_test, value=0, padding='post', maxlen=50)# 模型建立和初始化model_conf = {"MAX_LEN": 50, "w2c_len": 300, "emb_model": word2vector}train_conf = {"batch_size": 64, "epochs": 5, "verbose": 1}cls_model = ClsModel("textcnn_small", model_conf, train_conf)# 训练、评估cls_model.fit(x_train, y_train, x_test, y_test)print(cls_model.evaluate(x_test, y_test))cls_model.save("./data/cls/shopping_reviews/model_20201007")# 单独测试case,这里包括用新训练好的模型和保存加载后的模型sentence = "这台手机真性能还挺好的"print(cls_model.predict([sentence]))sentence = "这台手机真性能还挺好的"word2vector = Word2vector()cls_model = ClsModel("load", model_conf={"path":"./data/cls/shopping_reviews/model_20201007", "emb_model": word2vector}, train_conf={})print(cls_model.predict([sentence]))
这只是初步建立的一个架构,细节还不太完整,需要完善。
时候未到,尚未开源,敬请期待。
会做实验写算法是一方面,会把自己的东西规范化、结构化整上线又是另一回事,真正的算法工程师,要足够全面,了解工程,最终才能够完成这个项目,有了这个框架,自己尝试更多的模型其实会更加快哈哈哈。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。