赞
踩
- import tensorflow as tf
-
-
- def inference(images, batch_size, n_classes):
- with tf.variable_scope('conv1') as scope:
- # 卷积盒的为 3*3 的卷积盒,图片厚度是3,输出是16个featuremap
- weights = tf.get_variable('weights',
- shape=[3, 3, 3, 16],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[16],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
- pre_activation = tf.nn.bias_add(conv, biases)
- conv1 = tf.nn.relu(pre_activation, name=scope.name)
-
- with tf.variable_scope('pooling1_lrn') as scope:
- pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling1')
- norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
-
- with tf.variable_scope('conv2') as scope:
- weights = tf.get_variable('weights',
- shape=[3, 3, 16, 16],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[16],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
- pre_activation = tf.nn.bias_add(conv, biases)
- conv2 = tf.nn.relu(pre_activation, name='conv2')
-
- # pool2 and norm2
- with tf.variable_scope('pooling2_lrn') as scope:
- norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
- pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')
-
- with tf.variable_scope('local3') as scope:
- reshape = tf.reshape(pool2, shape=[batch_size, -1])
- dim = reshape.get_shape()[1].value
- weights = tf.get_variable('weights',
- shape=[dim, 128],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[128],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
-
- # local4
- with tf.variable_scope('local4') as scope:
- weights = tf.get_variable('weights',
- shape=[128, 128],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[128],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')
-
- # softmax
- with tf.variable_scope('softmax_linear') as scope:
- weights = tf.get_variable('softmax_linear',
- shape=[128, n_classes],
- dtype=tf.float32,
- initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
- biases = tf.get_variable('biases',
- shape=[n_classes],
- dtype=tf.float32,
- initializer=tf.constant_initializer(0.1))
- softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
-
- return softmax_linear
-
-
- def losses(logits, labels):
- with tf.variable_scope('loss') as scope:
- cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
- (logits=logits, labels=labels, name='xentropy_per_example')
- loss = tf.reduce_mean(cross_entropy, name='loss')
- tf.summary.scalar(scope.name + '/loss', loss)
- return loss
-
-
- def trainning(loss, learning_rate):
- with tf.name_scope('optimizer'):
- optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
- global_step = tf.Variable(0, name='global_step', trainable=False)
- train_op = optimizer.minimize(loss, global_step=global_step)
- return train_op
-
-
- def evaluation(logits, labels):
- with tf.variable_scope('accuracy') as scope:
- correct = tf.nn.in_top_k(logits, labels, 1)
- correct = tf.cast(correct, tf.float16)
- accuracy = tf.reduce_mean(correct)
- tf.summary.scalar(scope.name + '/accuracy', accuracy)
- return accuracy
- import numpy as np
- import tensorflow as tf
-
-
-
- def read_and_decode(filename): # 读入tfrecords
- filename_queue = tf.train.string_input_producer([filename]) # 生成一个queue队列
-
- reader = tf.TFRecordReader()
- _, serialized_example = reader.read(filename_queue) # 返回文件名和文件
- features = tf.parse_single_example(
- serialized_example,
- features={
- 'label': tf.FixedLenFeature([], tf.int64),
- 'img_raw': tf.FixedLenFeature([], tf.string),
- }
- ) # 将image数据和label取出来
-
- image = tf.decode_raw(features['img_raw'], tf.uint8)
- image = tf.reshape(image, [128, 128, 3])
- image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 #归一化
- label = tf.cast(features['label'], tf.int32)
-
- return image, label
- import tensorflow as tf
- import numpy as np
- import os
- import ReadMyOwnData
- import model
-
-
- n_classes = 2 #猫和狗
- image_W = 128 # resize图像,太大的话训练时间久
- image_H = 128
- batch_size = 4
- CAPACITY = 256
- MAX_STEP = 1000 # 一般大于10K
- learning_rate = 0.001 # 一般小于0.0001
-
- train_dir = '/home/cae_b8/Documents/project2/data_set/train/'
- logs_train_dir = '/home/cae_b8/Documents/project2/data_set/train/log/'
-
- train_record_path = '/home/cae_b8/Documents/project2/data_set/train/train.tfrecords'
- test_record_path = '/home/cae_b8/Documents/project2/data_set/train/test.tfrecords'
- train_images, train_labels = ReadMyOwnData.read_and_decode(train_record_path)
- #test_images, test_labels = ReadMyOwnData.read_and_decode(test_record_path)
- train_batch, train_label_batch = tf.train.batch([train_images, train_labels],
- batch_size=batch_size, capacity=128)
-
- #test_batch, test_label_batch = tf.train.batch([test_images, test_labels],
- batch_size=1, capacity=128)
-
-
- train_logits = model.inference(train_batch, batch_size, n_classes)
- train_loss = model.losses(train_logits, train_label_batch)
- train_op = model.trainning(train_loss, learning_rate)
- train_acc = model.evaluation(train_logits, train_label_batch)
- summary_op = tf.summary.merge_all() #这个是log汇总记录
-
- sess = tf.Session()
- train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
- saver = tf.train.Saver()
- sess.run(tf.global_variables_initializer())
-
- coord = tf.train.Coordinator()
- threads = tf.train.start_queue_runners(sess=sess, coord=coord)
-
- try:
- #执行MAX_STEP步的训练,一步一个batch
- for step in np.arange(MAX_STEP):
- if coord.should_stop():
- break
- #启动以下操作节点,有个疑问,为什么train_logits在这里没有开启?
- _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
- #每隔50步打印一次当前的loss以及acc,同时记录log,写入writer
- if step % 50 == 0:
- print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
- summary_str = sess.run(summary_op)
- train_writer.add_summary(summary_str, step)
- #每隔2000步,保存一次训练好的模型
- if step % 200 == 0 or (step + 1) == MAX_STEP:
- checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
- saver.save(sess, checkpoint_path, global_step=step)
-
- except tf.errors.OutOfRangeError:
- print('Done training -- epoch limit reached')
- finally:
- coord.request_stop()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。