赞
踩
import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, confusion_matrix from keras.layers import Embedding, LSTM, GRU, Dropout, Dense, Input from keras.models import Model, Sequential, load_model from keras.preprocessing import sequence from keras.datasets import imdb import gensim from gensim.models.word2vec import Word2Vec ''' 以LSTM为例,LSTM的长度为MAX_SEQ_LEN;每个cell输入一个单词,这个单词用one-hot表示 词向量矩阵是embedMatrix,记录词典中每个词的词向量;词的idx,对应embedMatrix的行号 “该词的ont-hot向量”点乘“embedMatrix”,便得到“该词的词向量表示” 比如:词典有5个词,也即:word2idx = {_stopWord:0, love:1, I:2, my:3, you:4, friend:5, my:6};每个词映射到2维; 输入句子:"I love my pen", #pen是停用词,其idx设为0 [0, 0] [0.3, 0.1] [0, 0, 1, 0, 0, 0] [-0.4, -0.5] [-0.4, -0.5] [0, 1, 0, 0, 0, 0] · [0.5, 0.2] = [0.3, 0.1] [0, 0, 0, 0, 0, 1] [-0.7, 0.6] [-0.3, -0.8] [1, 0, 0, 0, 0, 0] [-0.3, -0.8] [0, 0] [0.5, 0.2] ''' MAX_SEQ_LEN = 250 inPath = '../data/' def train_W2V(sentenList, embedSize=300, epoch_num=1): w2vModel = Word2Vec(sentences=sentenList, hs=0, negative=5, min_count=5, window=5, iter=epoch_num, size=embedSize) w2vModel.save(inPath + 'w2vModel') return w2vModel
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。