赞
踩
作者:禅与计算机程序设计艺术
sentences = [['this', 'is', 'the', 'first', 'sentence'], ['this', 'is', 'the', 'second', 'sentence'], ['this', 'is', 'the', 'third', 'sentence']]
model = gensim.models.Word2Vec(sentences, size=100, window=5, min_count=1, workers=4)
print(model.wv['sentence']) ```
inputsize = 10 outputsize = 5 hiddensize = 20 numlayers = 2 batchsize = 32 timesteps = 20
cells = [tf.nn.rnncell.BasicLSTMCell(hiddensize) for _ in range(numlayers)] stackedcells = tf.nn.rnn_cell.MultiRNNCell(cells)
inputs = tf.placeholder(tf.float32, shape=(None, timesteps, inputsize)) outputs = tf.placeholder(tf.float32, shape=(None, outputsize)) initialstate = stackedcells.zerostate(batch_size, tf.float32)
outputs, finalstate = tf.nn.dynamicrnn(stackedcells, inputs, initialstate=initial_state)
loss = tf.reduce_mean(tf.square(outputs - outputs))
train_op = tf.train.AdamOptimizer().minimize(loss) ```
inputsize = 10 outputsize = 5 contextsize = 20 batchsize = 32 time_steps = 20
inputs = tf.placeholder(tf.float32, shape=(None, timesteps, inputsize)) outputs = tf.placeholder(tf.float32, shape=(None, outputsize)) context = tf.placeholder(tf.float32, shape=(None, contextsize))
attentionweights = tf.nn.softmax(tf.nn.tanh(tf.matmul(inputs, W) + tf.matmul(context, V) + b)) contextvector = tf.reducesum(tf.multiply(attentionweights, context), axis=1)
outputs = tf.layers.dense(tf.concat([inputs, contextvector], axis=-1), units=outputsize)
loss = tf.reduce_mean(tf.square(outputs - outputs))
train_op = tf.train.AdamOptimizer().minimize(loss) ```
inputsize = 10 outputsize = 5 embeddingsize = 20 numheads = 2 batchsize = 32 timesteps = 20
inputs = tf.placeholder(tf.float32, shape=(None, timesteps, inputsize)) outputs = tf.placeholder(tf.float32, shape=(None, outputsize)) embeddingmatrix = tf.getvariable('embeddingmatrix', shape=(inputsize, embeddingsize))
inputsencoded = tf.nn.tanh(tf.matmul(inputs, embeddingmatrix))
outputsdecoded = [] for i in range(timesteps): # 计算当前时刻的 attention weights attendedinputs = tf.reducesum(tf.multiply(inputsencoded, attentionweights[:, :i+1]), axis=1) # 计算当前时刻的输出 output = tf.layers.dense(tf.concat([attendedinputs, embeddingmatrix[outputs[:, i]]], axis=-1), units=outputsize) outputsdecoded.append(output) outputsdecoded = tf.stack(outputsdecoded, axis=1)
outputs = tf.layers.dense(tf.concat([inputs, outputsdecoded], axis=-1), units=outputsize)
loss = tf.reduce_mean(tf.square(outputs - outputs))
train_op = tf.train.AdamOptimizer().minimize(loss) ```
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。