赞
踩
TensorBoard是一个可视化工具,它可以用来展示网络图、张量的指标变化、张量的分布情况等。特别是在训练网络的时候,我们可以设置不同的参数(比如:权重W、偏置B、卷积层数、全连接层数等),使用TensorBoader可以很直观的帮我们进行参数的选择。
本系列文章一共四篇,由浅入深,以常见的Mnist数据集为实验对象,从实现最简单TensorBoard开始,逐步增加参数、复杂度,最后实现使用TensorBoard工具可视化调参的功能。
TensorBoard的使用一共可分成四步:
演示以上四步的一个实现过程,代码如下:
mnist_board_1.py:
- import os
- import tensorflow as tf
-
- LOGDIR = './mnist'
-
- mnist = tf.contrib.learn.datasets.mnist.read_data_sets(train_dir=LOGDIR + 'data', one_hot=True)
-
-
- def conv_layer(input, size_in, size_out):
- w = tf.Variable(tf.truncated_normal([5, 5, size_in, size_out], stddev=0.1))
- b = tf.Variable(tf.constant(0.1, shape=[size_out]))
- conv = tf.nn.conv2d(input, w, strides=[1, 1, 1, 1], padding='SAME')
- act = tf.nn.relu(conv + b)
-
- return tf.nn.max_pool(act, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
-
-
- def fc_layer(input, size_in, size_out):
- w = tf.Variable(tf.truncated_normal([size_in, size_out], stddev=0.1))
- b = tf.Variable(tf.constant(0.1, shape=[size_out]))
- act = tf.nn.relu(tf.matmul(input, w) + b)
-
- return act
-
-
- def mnist_model(learning_rate, use_two_conv, use_two_fc, hparam):
- tf.reset_default_graph()
- sess = tf.Session()
-
- # setup placeholders, and reshape the data
- x = tf.placeholder(tf.float32, shape=[None, 784])
- x_image = tf.reshape(x, [-1, 28, 28, 1])
-
- y = tf.placeholder(tf.float32, shape=[None, 10])
-
- if use_two_conv:
- conv1 = conv_layer(x_image, 1, 32)
- conv_out = conv_layer(conv1, 32, 64)
-
- else:
- conv1 = conv_layer(x_image, 1, 64)
- conv_out = tf.nn.max_pool(conv1, ksize=[1, 2, 2, 1], strides=[1, 2, 2, 1], padding='SAME')
-
- flattened = tf.reshape(conv_out, [-1, 7 * 7 * 64])
-
- if use_two_fc:
- fc1 = fc_layer(flattened, 7 * 7 * 64, 1024)
- embedding_input = fc1
- embedding_size = 1024
- logits = fc_layer(fc1, 1024, 10)
-
- else:
- embedding_input = flattened
- embedding_size = 7 * 7 * 64
- logits = fc_layer(flattened, 7 * 7 * 64, 10)
-
- xent = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=y))
- train_step = tf.train.AdamOptimizer(learning_rate).minimize(xent)
-
- correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(y, 1))
- accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
-
- emdedding = tf.Variable(tf.zeros([1024, embedding_size]))
- assignment = emdedding.assign(embedding_input)
-
- sess.run(tf.global_variables_initializer())
- # 保存路径
- tenboard_dir = './tensorboard/test1/'
-
- # 指定一个文件用来保存图
- writer = tf.summary.FileWriter(tenboard_dir + hparam)
- # 把图add进去
- writer.add_graph(sess.graph)
-
- for i in range(2001):
- batch = mnist.train.next_batch(100)
- sess.run(train_step, feed_dict={x: batch[0], y: batch[1]})
-
-
- def make_hparam_string(learning_rate, use_two_fc, use_two_conv):
- conv_param = 'conv=2' if use_two_conv else 'conv=1'
- fc_param = 'fc=2' if use_two_fc else 'fc=1'
- return 'lr_%.0E,%s,%s' % (learning_rate, conv_param, fc_param)
-
-
- def main():
- # You can try adding some more learning rates
- for learning_rate in [1E-4]:
-
- # Include 'False' as a value to try different model architectures.
- for use_two_fc in [True]:
- for use_two_conv in [True]:
- # Construct a hyperparameter string for each one(example: 'lr_1E-3,fc=2,conv=2')
- hparam = make_hparam_string(learning_rate, use_two_fc, use_two_conv)
- print('Starting run for %s' % hparam)
-
- # Actually run with the new settings
- mnist_model(learning_rate, use_two_fc, use_two_conv, hparam)
-
-
- if __name__ == '__main__':
- main()
以上程序对Mnist数据集进行两次卷积、两次全连接操作。运行后,生成如下文件:
打开Windows的DOS命令行,进入test1的上一层目录,注意此处的目录一定不能进错。执行:tensorboard --logdir=./test1
在浏览器输入网址:http://localhost:6006,或者输入上图提示的网址,即可查看生成图。
由上图可以看到,生成的图较大、较乱,并且变量都是以Variable_XXX命名, 阅读起来让人不知所云。
下篇文章讲述如何针对上述情况进行优化——定义操作名字、作用域。
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。