赞
踩
下载完 tensorflow 后 需要下载相应的 keras 版本
执行命令的东西
import tensorflow as tf matrix1 = tf.constant([[3, 3]]) matrix2 = tf.constant([[2], [2]]) # print(matrix1) # print(matrix2) # matrix multiply np.dot(m1, m2) Go ahead and multiply the column product = tf.matmul(matrix1, matrix2) # method 1 . tf.Session() have removed sess = tf.compat.v1.Session() # Each run executes a result result = sess.run(product) # method 2 with tf.Session() as sess: result2 = sess.run(product) print(result2)
import tensorflow as tf __all__ = [tf] state = tf.Variable(0, name='counter') print(state.name) one = tf.constant(1) new_value = tf.add(state, one) # new_value load in state, so state is new_value updata = tf.assign(state, new_value) # must have if define variable init = tf.initialize_all_variables() with tf.Session() as sess: # At first must have the run(init) to initial sess.run(init) for _ in range(3): sess.run(updata) # you should print though this if you want to see result print(sess.run(state))
import tensorflow as tf
# placeholder show you can get a parameter when sess is running
input1 = tf.placeholder(tf.float32)
input2 = tf.placeholder(tf.float32)
output = tf.multiply(input1, input2)
with tf.Session() as sess:
# Through type dict to express the value
print(sess.run(output, feed_dict={input1:[7.], input2:[2.]}))
当使用多层神经网络是,慎重考虑激励函数,因为可能导致梯度消失和梯度爆炸
五、 tensorflow 搭建神经网络
import tensorflow as tf import numpy as np def add_layer(inputs, in_size, out_size, activation_function=None): Weights = tf.Variable(tf.random_normal([in_size, out_size])) biases = tf.Variable(tf.zeros([1, out_size]) + 0.1) Wx_plus_b = tf.matmul(inputs, Weights) + biases if activation_function is None: outputs = Wx_plus_b else : outputs = activation_function(Wx_plus_b) return outputs x_data = np.linspace(-1, 1, 300)[:, np.newaxis] noise = np.random.normal(0, 0.05, x_data.shape) y_data = np.square(x_data) - 0.5 + noise xs = tf.placeholder(tf.float32, [None, 1]) ys = tf.placeholder(tf.float32, [None, 1]) l1 = add_layer(xs, 1, 10, activation_function=tf.nn.relu) predition = add_layer(l1, 10, 1, activation_function=None) # MSE, loss = tf.reduce_mean(tf.reduce_sum(tf.square(ys - predition), reduction_indices=[1])) # by 0.1 step upgrade train_step = tf.train.GradientDescentOptimizer(0.1).minimize(loss) # must initial the variable init = tf.initialize_all_variables() sess = tf.Session() sess.run(init) for i in range(1000): sess.run(train_step, feed_dict={xs:x_data, ys:y_data}) if i % 100 == 0: print(sess.run(loss, feed_dict={xs:x_data, ys:y_data}))
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。