当前位置:   article > 正文

Tensorflow(三)训练自己的数据,分块版本_train_logits = model.inference(train_batch, batch_

train_logits = model.inference(train_batch, batch_size, n_classes) # 初始化

1.model.py

  1. import tensorflow as tf
  2. def inference(images, batch_size, n_classes):
  3. with tf.variable_scope('conv1') as scope:
  4. # 卷积盒的为 3*3 的卷积盒,图片厚度是3,输出是16个featuremap
  5. weights = tf.get_variable('weights',
  6. shape=[3, 3, 3, 16],
  7. dtype=tf.float32,
  8. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  9. biases = tf.get_variable('biases',
  10. shape=[16],
  11. dtype=tf.float32,
  12. initializer=tf.constant_initializer(0.1))
  13. conv = tf.nn.conv2d(images, weights, strides=[1, 1, 1, 1], padding='SAME')
  14. pre_activation = tf.nn.bias_add(conv, biases)
  15. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  16. with tf.variable_scope('pooling1_lrn') as scope:
  17. pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME', name='pooling1')
  18. norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm1')
  19. with tf.variable_scope('conv2') as scope:
  20. weights = tf.get_variable('weights',
  21. shape=[3, 3, 16, 16],
  22. dtype=tf.float32,
  23. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  24. biases = tf.get_variable('biases',
  25. shape=[16],
  26. dtype=tf.float32,
  27. initializer=tf.constant_initializer(0.1))
  28. conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
  29. pre_activation = tf.nn.bias_add(conv, biases)
  30. conv2 = tf.nn.relu(pre_activation, name='conv2')
  31. # pool2 and norm2
  32. with tf.variable_scope('pooling2_lrn') as scope:
  33. norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0, beta=0.75, name='norm2')
  34. pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 1, 1, 1], padding='SAME', name='pooling2')
  35. with tf.variable_scope('local3') as scope:
  36. reshape = tf.reshape(pool2, shape=[batch_size, -1])
  37. dim = reshape.get_shape()[1].value
  38. weights = tf.get_variable('weights',
  39. shape=[dim, 128],
  40. dtype=tf.float32,
  41. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  42. biases = tf.get_variable('biases',
  43. shape=[128],
  44. dtype=tf.float32,
  45. initializer=tf.constant_initializer(0.1))
  46. local3 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  47. # local4
  48. with tf.variable_scope('local4') as scope:
  49. weights = tf.get_variable('weights',
  50. shape=[128, 128],
  51. dtype=tf.float32,
  52. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  53. biases = tf.get_variable('biases',
  54. shape=[128],
  55. dtype=tf.float32,
  56. initializer=tf.constant_initializer(0.1))
  57. local4 = tf.nn.relu(tf.matmul(local3, weights) + biases, name='local4')
  58. # softmax
  59. with tf.variable_scope('softmax_linear') as scope:
  60. weights = tf.get_variable('softmax_linear',
  61. shape=[128, n_classes],
  62. dtype=tf.float32,
  63. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  64. biases = tf.get_variable('biases',
  65. shape=[n_classes],
  66. dtype=tf.float32,
  67. initializer=tf.constant_initializer(0.1))
  68. softmax_linear = tf.add(tf.matmul(local4, weights), biases, name='softmax_linear')
  69. return softmax_linear
  70. def losses(logits, labels):
  71. with tf.variable_scope('loss') as scope:
  72. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
  73. (logits=logits, labels=labels, name='xentropy_per_example')
  74. loss = tf.reduce_mean(cross_entropy, name='loss')
  75. tf.summary.scalar(scope.name + '/loss', loss)
  76. return loss
  77. def trainning(loss, learning_rate):
  78. with tf.name_scope('optimizer'):
  79. optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate)
  80. global_step = tf.Variable(0, name='global_step', trainable=False)
  81. train_op = optimizer.minimize(loss, global_step=global_step)
  82. return train_op
  83. def evaluation(logits, labels):
  84. with tf.variable_scope('accuracy') as scope:
  85. correct = tf.nn.in_top_k(logits, labels, 1)
  86. correct = tf.cast(correct, tf.float16)
  87. accuracy = tf.reduce_mean(correct)
  88. tf.summary.scalar(scope.name + '/accuracy', accuracy)
  89. return accuracy

2.ReadMyOwnData.py

  1. import numpy as np
  2. import tensorflow as tf
  3. def read_and_decode(filename): # 读入tfrecords
  4. filename_queue = tf.train.string_input_producer([filename]) # 生成一个queue队列
  5. reader = tf.TFRecordReader()
  6. _, serialized_example = reader.read(filename_queue) # 返回文件名和文件
  7. features = tf.parse_single_example(
  8. serialized_example,
  9. features={
  10. 'label': tf.FixedLenFeature([], tf.int64),
  11. 'img_raw': tf.FixedLenFeature([], tf.string),
  12. }
  13. ) # 将image数据和label取出来
  14. image = tf.decode_raw(features['img_raw'], tf.uint8)
  15. image = tf.reshape(image, [128, 128, 3])
  16. image = tf.cast(image, tf.float32) * (1. / 255) - 0.5 #归一化
  17. label = tf.cast(features['label'], tf.int32)
  18. return image, label

3.just_train.py

  1. import tensorflow as tf
  2. import numpy as np
  3. import os
  4. import ReadMyOwnData
  5. import model
  6. n_classes = 2 #猫和狗
  7. image_W = 128 # resize图像,太大的话训练时间久
  8. image_H = 128
  9. batch_size = 4
  10. CAPACITY = 256
  11. MAX_STEP = 1000 # 一般大于10K
  12. learning_rate = 0.001 # 一般小于0.0001
  13. train_dir = '/home/cae_b8/Documents/project2/data_set/train/'
  14. logs_train_dir = '/home/cae_b8/Documents/project2/data_set/train/log/'
  15. train_record_path = '/home/cae_b8/Documents/project2/data_set/train/train.tfrecords'
  16. test_record_path = '/home/cae_b8/Documents/project2/data_set/train/test.tfrecords'
  17. train_images, train_labels = ReadMyOwnData.read_and_decode(train_record_path)
  18. #test_images, test_labels = ReadMyOwnData.read_and_decode(test_record_path)
  19. train_batch, train_label_batch = tf.train.batch([train_images, train_labels],
  20. batch_size=batch_size, capacity=128)
  21. #test_batch, test_label_batch = tf.train.batch([test_images, test_labels],
  22. batch_size=1, capacity=128)
  23. train_logits = model.inference(train_batch, batch_size, n_classes)
  24. train_loss = model.losses(train_logits, train_label_batch)
  25. train_op = model.trainning(train_loss, learning_rate)
  26. train_acc = model.evaluation(train_logits, train_label_batch)
  27. summary_op = tf.summary.merge_all() #这个是log汇总记录
  28. sess = tf.Session()
  29. train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)
  30. saver = tf.train.Saver()
  31. sess.run(tf.global_variables_initializer())
  32. coord = tf.train.Coordinator()
  33. threads = tf.train.start_queue_runners(sess=sess, coord=coord)
  34. try:
  35. #执行MAX_STEP步的训练,一步一个batch
  36. for step in np.arange(MAX_STEP):
  37. if coord.should_stop():
  38. break
  39. #启动以下操作节点,有个疑问,为什么train_logits在这里没有开启?
  40. _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
  41. #每隔50步打印一次当前的loss以及acc,同时记录log,写入writer
  42. if step % 50 == 0:
  43. print('Step %d, train loss = %.2f, train accuracy = %.2f%%' %(step, tra_loss, tra_acc*100.0))
  44. summary_str = sess.run(summary_op)
  45. train_writer.add_summary(summary_str, step)
  46. #每隔2000步,保存一次训练好的模型
  47. if step % 200 == 0 or (step + 1) == MAX_STEP:
  48. checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
  49. saver.save(sess, checkpoint_path, global_step=step)
  50. except tf.errors.OutOfRangeError:
  51. print('Done training -- epoch limit reached')
  52. finally:
  53. coord.request_stop()

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/Gausst松鼠会/article/detail/123942
推荐阅读
相关标签
  

闽ICP备14008679号