当前位置:   article > 正文

Alexnet网络的实现(tensorflow版本)_def cnn_inference(images, batch_size, n_classes):

def cnn_inference(images, batch_size, n_classes): with tf.variable_scope('co

由于基础不是很好,最近花了大概五天的时间。终于把Alexnet网络实现了。首先看一下网络结构。

test,test2分别是训练网络时生成的文件。input_data是处理数据的文件,3个model文件,由上至下分别对应了最原始,最简单的Alexnet网络结构,其次是模仿2块GPU进行训练的网络结构,最后是去掉lrn层的网络结构。test.py文件是取一张没有训练过的图片进行测试,两个training文件对应两个model文件的网络进行训练。我并没有训练去掉lrn层的网络,感兴趣的朋友也可以将training.py改动一下进行训练。

我主要参考了这篇文章进行编码:https://blog.csdn.net/qq_26499769/article/details/82928178

在写代码前首先要理清楚思路:

  • 网络的结构,每一步的输入与输出分别是什么,最后要实现何种功能
  • 选择数据集,根据网络的输入,对数据集进行处理
  • 设计网络的结构
  • 训练网络
  • 测试网络
  • 总结

我们可以按着思路来走一遍:

首先我们需要了解Alexnet的网络结构,如下图所示:

接着是第二步,我选择的是来源于Kaggle的猫狗数据集,数据集有12500只猫和12500只狗。

第三步是根据前两步,设计我们的网络结构,见第一张图。

接着是编写代码训练网络,然后测试网络。

input_data.py

  1. import tensorflow as tf
  2. import numpy as np
  3. import os
  4. #img_width = 227
  5. #img_height = 227
  6. #train_dir = 'H:/liangpan/train1/'
  7. def get_files(file_dir):
  8. cats = []
  9. label_cats = []
  10. dogs = []
  11. label_dogs = []
  12. for file in os.listdir(file_dir):#os.listdir返回指定的文件夹包含的文件或文件夹的名字的列表
  13. # 将图片存入一个列表中
  14. # 如果是猫的图片,将图片的标签为0
  15. name = file.split(sep='.')
  16. if name[0] == 'cat':
  17. cats.append(file_dir + file)
  18. label_cats.append(0)
  19. else:
  20. # 如果是狗的图片,将图片的标签为1
  21. dogs.append(file_dir + file)
  22. label_dogs.append(1)
  23. print('There are %d cats\nThere are %d dogs' % (len(cats), len(dogs)))
  24. image_list = np.hstack((cats, dogs))
  25. label_list = np.hstack((label_cats, label_dogs))
  26. # 把图片和标签都取出放入temp文件中,然后将图片的顺序打乱,最后取出
  27. temp = np.array([image_list, label_list])
  28. temp = temp.transpose()#转置,由两行的矩阵转换成两列的矩阵
  29. np.random.shuffle(temp)
  30. image_list = list(temp[:, 0])
  31. label_list = list(temp[:, 1])
  32. label_list = [int(i) for i in label_list]
  33. return image_list, label_list
  34. #image_list, label_list = get_files(train_dir)
  35. def get_batch(image, label, image_W, image_H, batch_size, capacity):
  36. image = tf.cast(image, tf.string)
  37. label = tf.cast(label, tf.int32)
  38. # 将图片与其标签放入队列中
  39. input_queue = tf.train.slice_input_producer([image, label])
  40. label = input_queue[1]
  41. image_contents = tf.read_file(input_queue[0])
  42. # 把图片解码
  43. image = tf.image.decode_jpeg(image_contents, channels=3)
  44. #设置图片的大小
  45. image = tf.image.resize_image_with_crop_or_pad(image, image_W, image_H)
  46. #将图片进行标准化
  47. image = tf.image.per_image_standardization(image)
  48. # 生成批次
  49. image_batch, label_batch = tf.train.batch([image, label], batch_size=batch_size, num_threads=64, capacity=capacity)
  50. label_batch = tf.reshape(label_batch, [batch_size])
  51. return image_batch, label_batch
  52. #对get_batch进行测试
  53. """
  54. import matplotlib.pyplot as plt
  55. BATCH_SIZE = 2
  56. CAPACITY = 256
  57. IMG_W = 208
  58. IMG_H = 208
  59. train_dir = 'H:/liangpan/train1/'
  60. image_list, label_list = get_files(train_dir)
  61. image_batch, label_batch = get_batch(image_list, label_list, IMG_W, IMG_H, BATCH_SIZE, CAPACITY)
  62. with tf.Session() as sess:
  63. i = 0
  64. coord = tf.train.Coordinator()
  65. threads = tf.train.start_queue_runners(coord=coord)
  66. try:
  67. while not coord.should_stop() and i < 1:
  68. img, label = sess.run([image_batch, label_batch])
  69. for j in np.arange(BATCH_SIZE):
  70. print('label:%d' % label[j])
  71. plt.imshow(img[j, :, :, :])
  72. plt.show()
  73. i += 1
  74. except tf.errors.OutOfRangeError:
  75. print('done!')
  76. finally:
  77. coord.request_stop()
  78. coord.join(threads)
  79. """

model.py

  1. import tensorflow as tf
  2. #输出各层参数
  3. def shape(value):
  4. print(value.op.name,value.get_shape().as_list())
  5. def inference(images, batch_size, n_classes):
  6. #conv1
  7. with tf.variable_scope('conv1') as scope:
  8. weights = tf.get_variable('weights',
  9. shape=[11,11,3,96],#随机生成96个11*11*3 的卷积核
  10. dtype=tf.float32,
  11. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  12. # 其中tf.truncated_normal:从截断的正态分布中输出随机值。
  13. # 生成的值服从具有指定平均值和标准偏差的正态分布,如果生成的值大于平均值2个标准偏差的值则丢弃重新选择。
  14. # stddev: 正态分布的标准差。
  15. biases = tf.get_variable("biases",
  16. shape=[96],
  17. dtype=tf.float32,
  18. initializer=tf.constant_initializer(0.1))
  19. conv = tf.nn.conv2d(images, weights, strides=[1, 4, 4, 1], padding='SAME')
  20. # 描述:过滤器移动的步长,第一位和第四位一般恒定为1,第二位指水平移动时候的步长,第三位指垂直移动的步长。
  21. # strides = [1, stride, stride, 1].
  22. # Valid: 用过滤器在输入的矩阵中按步长移动时候,会把最后的不足部分的列和行抛弃;
  23. # Same:先在输入矩阵上下各加个值为0的行,在左右各加个个值为0的列,也就是用0把原先的矩阵包裹一层,
  24. # 然后在移动的时候如果输入矩阵的列或者行长度不够,就用0来补齐。
  25. pre_activation = tf.nn.bias_add(conv, biases)
  26. #tf.nn.bias_add将偏差项biases(向量)加到conv(矩阵)上,是向量与矩阵的每一行进行相加,得到的结果和conv矩阵大小相同
  27. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  28. #使用relu激活函数进行激活
  29. with tf.variable_scope('pooling1_lrn') as scope:
  30. # conv1经卷积之后得到的feature map,那么它就具有[batch, height, width, channels]这样的shape
  31. shape(conv1)
  32. # 池化层
  33. pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  34. padding='VALID', name='pooling1')
  35. # 局部响应归一化层
  36. norm1 = tf.nn.lrn(pool1, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
  37. beta=0.75, name='norm1')
  38. shape(norm1)
  39. #conv2
  40. with tf.variable_scope('conv2') as scope:
  41. weights = tf.get_variable('weights',
  42. shape=[5, 5, 96, 256],
  43. dtype=tf.float32,
  44. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  45. biases = tf.get_variable('biases',
  46. shape=[256],
  47. dtype=tf.float32,
  48. initializer=tf.constant_initializer(0.1))
  49. conv = tf.nn.conv2d(norm1, weights, strides=[1, 1, 1, 1], padding='SAME')
  50. pre_activation = tf.nn.bias_add(conv, biases)
  51. conv2 = tf.nn.relu(pre_activation, name='conv2')
  52. with tf.variable_scope('pooling2_lrn') as scope:
  53. norm2 = tf.nn.lrn(conv2, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
  54. beta=0.75, name='norm2')
  55. pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  56. padding='VALID', name='pooling2')
  57. #conv3
  58. with tf.variable_scope('conv3') as scope:
  59. weights = tf.get_variable('weights',
  60. shape=[3, 3, 256, 384],
  61. dtype=tf.float32,
  62. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  63. biases = tf.get_variable('biases',
  64. shape=[384],
  65. dtype=tf.float32,
  66. initializer=tf.constant_initializer(0.1))
  67. conv = tf.nn.conv2d(pool2, weights, strides=[1, 1, 1, 1], padding='SAME')
  68. pre_activation = tf.nn.bias_add(conv, biases)
  69. conv3 = tf.nn.relu(pre_activation, name='conv3')
  70. #conv4
  71. with tf.variable_scope('conv4') as scope:
  72. weights = tf.get_variable('weights',
  73. shape=[3, 3, 384, 384],
  74. dtype=tf.float32,
  75. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  76. biases = tf.get_variable('biases',
  77. shape=[384],
  78. dtype=tf.float32,
  79. initializer=tf.constant_initializer(0.1))
  80. conv = tf.nn.conv2d(conv3, weights, strides=[1, 1, 1, 1], padding='SAME')
  81. pre_activation = tf.nn.bias_add(conv, biases)
  82. conv4 = tf.nn.relu(pre_activation, name='conv4')
  83. #conv5
  84. with tf.variable_scope('conv5') as scope:
  85. weights = tf.get_variable('weights',
  86. shape=[3, 3, 384, 256],
  87. dtype=tf.float32,
  88. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  89. biases = tf.get_variable('biases',
  90. shape=[256],
  91. dtype=tf.float32,
  92. initializer=tf.constant_initializer(0.1))
  93. conv = tf.nn.conv2d(conv4, weights, strides=[1, 1, 1, 1], padding='SAME')
  94. pre_activation = tf.nn.bias_add(conv, biases)
  95. conv5 = tf.nn.relu(pre_activation, name='conv5')
  96. with tf.variable_scope('pooling2_lrn') as scope:
  97. norm5 = tf.nn.lrn(conv5, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
  98. beta=0.75, name='norm5')
  99. pool5 = tf.nn.max_pool(norm5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  100. padding='VALID', name='pooling5')
  101. print("pool5.shape = ",pool5.shape)
  102. #fc6
  103. with tf.variable_scope('fc6') as scope:
  104. reshape = tf.reshape(pool5, shape=[batch_size, -1])#将pool5拉直,将最后3个维度变成一个维度,并且保留前面的batch_size维度
  105. print("reshape.shape = ", reshape.shape)
  106. dim = reshape.get_shape()[1].value
  107. print("dim",dim)
  108. weights = tf.get_variable('weights',
  109. shape=[dim, 4096],
  110. dtype=tf.float32,
  111. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  112. biases = tf.get_variable('biases',
  113. shape=[4096],
  114. dtype=tf.float32,
  115. initializer=tf.constant_initializer(0.1))
  116. fc6 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  117. # dropout6
  118. with tf.name_scope('dropout6') as scope:
  119. dropout6 = tf.nn.dropout(fc6, 0.5)#0.5表示随机选择一半的神经元失效
  120. with tf.variable_scope('fc7') as scope:
  121. weights = tf.get_variable('weights',
  122. shape=[4096, 4096],
  123. dtype=tf.float32,
  124. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  125. biases = tf.get_variable('biases',
  126. shape=[4096],
  127. dtype=tf.float32,
  128. initializer=tf.constant_initializer(0.1))
  129. fc7 = tf.nn.relu(tf.matmul(dropout6, weights) + biases, name='fc7')
  130. # dropout7
  131. with tf.name_scope('dropout6') as scope:
  132. dropout7 = tf.nn.dropout(fc7, 0.5)
  133. #fc8
  134. with tf.variable_scope('fc8') as scope:
  135. weights = tf.get_variable('fc8',
  136. shape=[4096, n_classes],
  137. dtype=tf.float32,
  138. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  139. biases = tf.get_variable('biases',
  140. shape=[n_classes],
  141. dtype=tf.float32,
  142. initializer=tf.constant_initializer(0.1))
  143. fc8 = tf.add(tf.matmul(dropout7, weights), biases, name='fc8')
  144. return fc8
  145. def losses(logits, labels):
  146. with tf.variable_scope('loss') as scope:
  147. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
  148. (logits=logits, labels=labels, name='xentropy_per_example')
  149. loss = tf.reduce_mean(cross_entropy, name='loss')
  150. tf.summary.scalar(scope.name + '/loss', loss)
  151. return loss
  152. def training(loss, learning_rate):
  153. with tf.name_scope('optimizer'):
  154. optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
  155. global_step = tf.Variable(0, name='global_step', trainable=False)
  156. train_op = optimizer.minimize(loss, global_step=global_step)#用于记录全局训练步骤的单值
  157. return train_op
  158. #评估函数
  159. def evaluation(logits, labels):
  160. with tf.variable_scope('accuracy') as scope:
  161. # 用于计算预测的结果和实际结果的是否相等,返回一个bool类型的张量
  162. correct = tf.nn.in_top_k(logits, labels, 1)#预测的结果,实际样本类别的标签,一般都取1
  163. correct = tf.cast(correct, tf.float16)
  164. accuracy = tf.reduce_mean(correct)
  165. tf.summary.scalar(scope.name + '/accuracy', accuracy)#生成loss的标量信息
  166. return accuracy

training.py

  1. import os
  2. import numpy as np
  3. import tensorflow as tf
  4. import input_data
  5. import model
  6. N_CLASSES = 2#输出的分类的数量
  7. IMG_W = 227#图片的尺寸
  8. IMG_H = 227
  9. BATCH_SIZE = 8#每批数据的大小
  10. CAPACITY = 200
  11. MAX_STEP = 15000#训练步数
  12. learing_rate = 0.0001#学习率
  13. def run_training():
  14. train_dir = 'H:/liangpan/train1/'#训练的数据集所在的路径
  15. logs_train_dir = 'H:/liangpan/test_alexnet/test/'#存放训练模型过程的数据的存储路径
  16. # 获取图片和标签集
  17. train, train_label = input_data.get_files(train_dir)
  18. # 生成批次
  19. train_batch, train_label_batch = input_data.get_batch(train,
  20. train_label,
  21. IMG_W,
  22. IMG_H,
  23. BATCH_SIZE,
  24. CAPACITY)
  25. # 进入训练模型
  26. train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
  27. # 获取 loss
  28. train_loss = model.losses(train_logits, train_label_batch)
  29. # 训练
  30. train_op = model.training(train_loss, learing_rate)
  31. #获取准确率
  32. train_acc = model.evaluation(train_logits, train_label_batch)
  33. # 合并summary
  34. summary_op = tf.summary.merge_all()
  35. sess = tf.Session()
  36. train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)#保存summary
  37. saver = tf.train.Saver()
  38. sess.run(tf.global_variables_initializer())#初始化全部的变量
  39. # Coordinator类用来管理在Session中的多个线程,
  40. # 可以用来同时停止多个工作线程并且向那个在等待所有工作线程终止的程序报告异常,
  41. # 该线程捕获到这个异常之后就会终止所有线程。
  42. coord = tf.train.Coordinator()
  43. threads = tf.train.start_queue_runners(sess=sess, coord=coord)
  44. try:
  45. for step in np.arange(MAX_STEP):
  46. # coord.should_stop() 返回 true 时也就是数据读完了,应该调用 coord.request_stop()
  47. if coord.should_stop():
  48. break
  49. _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
  50. if step % 100 == 0:
  51. print('step %d,train loss=%.2f,train accuracy=%.2f' % (step, tra_loss, tra_acc))
  52. summary_str = sess.run(summary_op)
  53. train_writer.add_summary(summary_str, step)
  54. if step % 2000 == 0 or (step + 1) == MAX_STEP:
  55. # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
  56. checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
  57. saver.save(sess, checkpoint_path, global_step=step)
  58. except tf.errors.OutOfRangeError:
  59. print('Done traing -- epoch limit reached')
  60. finally:
  61. coord.request_stop()
  62. coord.join(threads)
  63. sess.close()
  64. run_training()

test.py测试的是training的model

  1. import os
  2. import numpy as np
  3. import tensorflow as tf
  4. import input_data
  5. import model
  6. N_CLASSES = 2#输出的分类的数量
  7. IMG_W = 227#图片的尺寸
  8. IMG_H = 227
  9. BATCH_SIZE = 8#每批数据的大小
  10. CAPACITY = 200
  11. MAX_STEP = 15000#训练步数
  12. learing_rate = 0.0001#学习率
  13. def run_training():
  14. train_dir = 'H:/liangpan/train1/'#训练的数据集所在的路径
  15. logs_train_dir = 'H:/liangpan/test_alexnet/test/'#存放训练模型过程的数据的存储路径
  16. # 获取图片和标签集
  17. train, train_label = input_data.get_files(train_dir)
  18. # 生成批次
  19. train_batch, train_label_batch = input_data.get_batch(train,
  20. train_label,
  21. IMG_W,
  22. IMG_H,
  23. BATCH_SIZE,
  24. CAPACITY)
  25. # 进入训练模型
  26. train_logits = model.inference(train_batch, BATCH_SIZE, N_CLASSES)
  27. # 获取 loss
  28. train_loss = model.losses(train_logits, train_label_batch)
  29. # 训练
  30. train_op = model.training(train_loss, learing_rate)
  31. #获取准确率
  32. train_acc = model.evaluation(train_logits, train_label_batch)
  33. # 合并summary
  34. summary_op = tf.summary.merge_all()
  35. sess = tf.Session()
  36. train_writer = tf.summary.FileWriter(logs_train_dir, sess.graph)#保存summary
  37. saver = tf.train.Saver()
  38. sess.run(tf.global_variables_initializer())#初始化全部的变量
  39. # Coordinator类用来管理在Session中的多个线程,
  40. # 可以用来同时停止多个工作线程并且向那个在等待所有工作线程终止的程序报告异常,
  41. # 该线程捕获到这个异常之后就会终止所有线程。
  42. coord = tf.train.Coordinator()
  43. threads = tf.train.start_queue_runners(sess=sess, coord=coord)
  44. try:
  45. for step in np.arange(MAX_STEP):
  46. # coord.should_stop() 返回 true 时也就是数据读完了,应该调用 coord.request_stop()
  47. if coord.should_stop():
  48. break
  49. _, tra_loss, tra_acc = sess.run([train_op, train_loss, train_acc])
  50. if step % 100 == 0:
  51. print('step %d,train loss=%.2f,train accuracy=%.2f' % (step, tra_loss, tra_acc))
  52. summary_str = sess.run(summary_op)
  53. train_writer.add_summary(summary_str, step)
  54. if step % 2000 == 0 or (step + 1) == MAX_STEP:
  55. # 每隔2000步保存一下模型,模型保存在 checkpoint_path 中
  56. checkpoint_path = os.path.join(logs_train_dir, 'model.ckpt')
  57. saver.save(sess, checkpoint_path, global_step=step)
  58. except tf.errors.OutOfRangeError:
  59. print('Done traing -- epoch limit reached')
  60. finally:
  61. coord.request_stop()
  62. coord.join(threads)
  63. sess.close()
  64. run_training()

test_2_gpu.py测试的是training_2_gpu的model

  1. from PIL import Image
  2. import matplotlib.pyplot as plt
  3. import numpy as np
  4. import input_data
  5. import tensorflow as tf
  6. import model_2_gpu
  7. #获取一张图片
  8. def get_one_image(train):
  9. n = len(train)
  10. ind = np.random.randint(0, n)#从数据集中随机选取一张图片
  11. img_dir = train[ind]#图片的路径
  12. print(img_dir)
  13. image = Image.open(img_dir)#打开图片
  14. plt.imshow(image)
  15. image = image.resize([227, 227])#把图片裁成大小为227*227
  16. image = np.array(image)#把图片转成数组
  17. plt.show()
  18. return image
  19. def evaluate_one_image():
  20. train_dir = 'H:/liangpan/train/'
  21. train, train_label = input_data.get_files(train_dir)
  22. image_array = get_one_image(train)
  23. # Graph.as_default() 的上下文管理器(context manager),它能够在这个上下文里面覆盖默认的图
  24. with tf.Graph().as_default():
  25. BATCH_SIZE = 1# 因为只读取一副图片 所以batch 设置为1
  26. N_CLASSES = 2
  27. # 转化图片格式
  28. image = tf.cast(image_array, tf.float32)
  29. image = tf.reshape(image, [1, 227, 227, 3])
  30. # 图片原来是三维的 [227, 227, 3] 重新定义图片形状 改为一个4D 四维的 tensor
  31. logit = model_2_gpu.inference(image, BATCH_SIZE, N_CLASSES)
  32. # 因为 inference 的返回没有用激活函数,所以在这里对结果用softmax 激活
  33. logit = tf.nn.softmax(logit)
  34. # 用最原始的输入数据的方式向模型输入数据 placeholder(占位符)
  35. x = tf.placeholder(tf.float32, shape=[227, 227, 3])
  36. logs_train_dir = 'H:/liangpan/test_alexnet/test2/'
  37. saver = tf.train.Saver()
  38. with tf.Session() as sess:
  39. print("reading checkpoints")
  40. # 将模型加载到sess 中
  41. ckpt = tf.train.get_checkpoint_state(logs_train_dir)
  42. if ckpt and ckpt.model_checkpoint_path:
  43. global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]
  44. saver.restore(sess, ckpt.model_checkpoint_path)
  45. print('loading success,global_step is %s' % global_step)
  46. else:
  47. print('no checkpoint file found')
  48. # 将图片输入到模型计算
  49. prediction = sess.run(logit, feed_dict={x: image_array})
  50. # 获取输出结果中最大概率的索引
  51. max_index = np.argmax(prediction)
  52. if max_index == 0:
  53. print("this is a cat with possibility %.6f" % prediction[:, 0])
  54. else:
  55. print("this is a dog with posiibility %.6f" % prediction[:, 1])
  56. evaluate_one_image()

model_2_gpu.py

  1. import tensorflow as tf
  2. #输出各层参数
  3. def shape(value):
  4. print(value.op.name,value.get_shape().as_list())
  5. def inference(images, batch_size, n_classes):
  6. #conv1
  7. with tf.variable_scope('conv1') as scope:
  8. weights = tf.get_variable('weights',
  9. shape=[11,11,3,96],
  10. dtype=tf.float32,
  11. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  12. biases = tf.get_variable("biases",
  13. shape=[96],
  14. dtype=tf.float32,
  15. initializer=tf.constant_initializer(0.1))
  16. conv = tf.nn.conv2d(images, weights, strides=[1, 4, 4, 1], padding='SAME')
  17. pre_activation = tf.nn.bias_add(conv, biases)
  18. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  19. with tf.variable_scope('pooling1_lrn') as scope:
  20. norm1 = tf.nn.lrn(conv1, depth_radius=2, bias=2.0, alpha=1e-4,
  21. beta=0.75, name='norm1')
  22. pool1 = tf.nn.max_pool(norm1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  23. padding='VALID', name='pooling1')
  24. #conv2
  25. with tf.variable_scope('conv2') as scope:
  26. # 数据的分组处理,分组时则把输入数据和权重先划分后做卷积运算,卷积结束后再用concat()合并起来
  27. pool1_groups = tf.split(axis=3, value=pool1, num_or_size_splits=2)
  28. weights = tf.get_variable('weights',
  29. shape=[5, 5, 48, 256],
  30. dtype=tf.float32,
  31. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  32. weights_groups = tf.split(axis=3, value=weights, num_or_size_splits=2)
  33. conv_up = tf.nn.conv2d(pool1_groups[0], weights_groups[0], [1, 1, 1, 1], padding='SAME')
  34. conv_down = tf.nn.conv2d(pool1_groups[1], weights_groups[1], [1, 1, 1, 1], padding='SAME')
  35. biases = tf.get_variable('biases',
  36. shape=[256],
  37. dtype=tf.float32,
  38. initializer=tf.constant_initializer(0.1))
  39. biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
  40. bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
  41. bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
  42. bias = tf.concat(axis=3, values=[bias_up, bias_down])
  43. conv2 = tf.nn.relu(bias, name=scope.name)
  44. with tf.variable_scope('pooling2_lrn') as scope:
  45. norm2 = tf.nn.lrn(conv2, depth_radius=2, bias=2.0, alpha=1e-4,
  46. beta=0.75, name='norm2')
  47. pool2 = tf.nn.max_pool(norm2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  48. padding='VALID', name='pooling2')
  49. #conv3
  50. with tf.variable_scope('conv3') as scope:
  51. weights = tf.get_variable('weights',
  52. shape=[3, 3, 256, 384],
  53. dtype=tf.float32,
  54. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  55. biases = tf.get_variable('biases',
  56. shape=[384],
  57. dtype=tf.float32,
  58. initializer=tf.constant_initializer(0.1))
  59. conv = tf.nn.conv2d(pool2, weights, strides=[1, 1, 1, 1], padding='SAME')
  60. pre_activation = tf.nn.bias_add(conv, biases)
  61. conv3 = tf.nn.relu(pre_activation, name='conv3')
  62. #conv4
  63. with tf.variable_scope('conv4') as scope:
  64. conv3_groups = tf.split(axis=3, value=conv3, num_or_size_splits=2)
  65. weights = tf.get_variable('weights',
  66. shape=[3, 3, 192, 384],
  67. dtype=tf.float32,
  68. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  69. weights_groups = tf.split(axis=3, value=weights, num_or_size_splits=2)
  70. conv_up = tf.nn.conv2d(conv3_groups[0], weights_groups[0], [1, 1, 1, 1], padding='SAME')
  71. conv_down = tf.nn.conv2d(conv3_groups[1], weights_groups[1], [1, 1, 1, 1], padding='SAME')
  72. biases = tf.get_variable('biases',
  73. shape=[384],
  74. dtype=tf.float32,
  75. initializer=tf.constant_initializer(0.1))
  76. biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
  77. bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
  78. bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
  79. bias = tf.concat(axis=3, values=[bias_up, bias_down])
  80. conv4 = tf.nn.relu(bias, name=scope.name)
  81. #conv5
  82. with tf.variable_scope('conv5') as scope:
  83. conv4_groups = tf.split(axis=3, value=conv4, num_or_size_splits=2)
  84. weights = tf.get_variable('weights',
  85. shape=[3, 3, 192, 256],
  86. dtype=tf.float32,
  87. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  88. weights_groups = tf.split(axis=3, value=weights, num_or_size_splits=2)
  89. conv_up = tf.nn.conv2d(conv4_groups[0], weights_groups[0], [1, 1, 1, 1], padding='SAME')
  90. conv_down = tf.nn.conv2d(conv4_groups[1], weights_groups[1], [1, 1, 1, 1], padding='SAME')
  91. biases = tf.get_variable('biases',
  92. shape=[256],
  93. dtype=tf.float32,
  94. initializer=tf.constant_initializer(0.1))
  95. biases_groups = tf.split(axis=0, value=biases, num_or_size_splits=2)
  96. bias_up = tf.nn.bias_add(conv_up, biases_groups[0])
  97. bias_down = tf.nn.bias_add(conv_down, biases_groups[1])
  98. bias = tf.concat(axis=3, values=[bias_up, bias_down])
  99. conv5 = tf.nn.relu(bias, name=scope.name)
  100. with tf.variable_scope('pooling2_lrn') as scope:
  101. norm5 = tf.nn.lrn(conv5, depth_radius=2, bias=2.0, alpha=1e-4,
  102. beta=0.75, name='norm5')
  103. pool5 = tf.nn.max_pool(norm5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  104. padding='VALID', name='pooling5')
  105. #fc6
  106. with tf.variable_scope('fc6') as scope:
  107. reshape = tf.reshape(pool5, shape=[batch_size, -1])
  108. dim = reshape.get_shape()[1].value
  109. weights = tf.get_variable('weights',
  110. shape=[dim, 4096],
  111. dtype=tf.float32,
  112. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  113. biases = tf.get_variable('biases',
  114. shape=[4096],
  115. dtype=tf.float32,
  116. initializer=tf.constant_initializer(0.1))
  117. fc6 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  118. # dropout6
  119. with tf.name_scope('dropout6') as scope:
  120. dropout6 = tf.nn.dropout(fc6, 0.5)
  121. with tf.variable_scope('fc7') as scope:
  122. weights = tf.get_variable('weights',
  123. shape=[4096, 4096],
  124. dtype=tf.float32,
  125. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  126. biases = tf.get_variable('biases',
  127. shape=[4096],
  128. dtype=tf.float32,
  129. initializer=tf.constant_initializer(0.1))
  130. fc7 = tf.nn.relu(tf.matmul(dropout6, weights) + biases, name='fc7')
  131. # dropout7
  132. with tf.name_scope('dropout6') as scope:
  133. dropout7 = tf.nn.dropout(fc7, 0.5)
  134. #fc8
  135. with tf.variable_scope('fc8') as scope:
  136. weights = tf.get_variable('fc8',
  137. shape=[4096, n_classes],
  138. dtype=tf.float32,
  139. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  140. biases = tf.get_variable('biases',
  141. shape=[n_classes],
  142. dtype=tf.float32,
  143. initializer=tf.constant_initializer(0.1))
  144. fc8 = tf.add(tf.matmul(dropout7, weights), biases, name='fc8')
  145. return fc8
  146. def losses(logits, labels):
  147. with tf.variable_scope('loss') as scope:
  148. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
  149. (logits=logits, labels=labels, name='xentropy_per_example')
  150. loss = tf.reduce_mean(cross_entropy, name='loss')
  151. tf.summary.scalar(scope.name + '/loss', loss)
  152. return loss
  153. def training(loss, learning_rate):
  154. with tf.name_scope('optimizer'):
  155. optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
  156. global_step = tf.Variable(0, name='global_step', trainable=False)
  157. train_op = optimizer.minimize(loss, global_step=global_step)
  158. return train_op
  159. def evaluation(logits, labels):
  160. with tf.variable_scope('accuracy') as scope:
  161. correct = tf.nn.in_top_k(logits, labels, 1)
  162. correct = tf.cast(correct, tf.float16)
  163. accuracy = tf.reduce_mean(correct)
  164. tf.summary.scalar(scope.name + '/accuracy', accuracy)
  165. return accuracy

model_no_lrn.py

  1. import tensorflow as tf
  2. def inference(images, batch_size, n_classes):
  3. #conv1
  4. with tf.variable_scope('conv1') as scope:
  5. weights = tf.get_variable('weights',
  6. shape=[11,11,3,96],
  7. dtype=tf.float32,
  8. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  9. biases = tf.get_variable("biases",
  10. shape=[96],
  11. dtype=tf.float32,
  12. initializer=tf.constant_initializer(0.1))
  13. conv = tf.nn.conv2d(images, weights, strides=[1, 4, 4, 1], padding='SAME')
  14. pre_activation = tf.nn.bias_add(conv, biases)
  15. conv1 = tf.nn.relu(pre_activation, name=scope.name)
  16. with tf.variable_scope('pooling1_lrn') as scope:
  17. pool1 = tf.nn.max_pool(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  18. padding='SAME', name='pooling1')
  19. #conv2
  20. with tf.variable_scope('conv2') as scope:
  21. weights = tf.get_variable('weights',
  22. shape=[5, 5, 96, 256],
  23. dtype=tf.float32,
  24. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  25. biases = tf.get_variable('biases',
  26. shape=[256],
  27. dtype=tf.float32,
  28. initializer=tf.constant_initializer(0.1))
  29. conv = tf.nn.conv2d(pool1, weights, strides=[1, 1, 1, 1], padding='SAME')
  30. pre_activation = tf.nn.bias_add(conv, biases)
  31. conv2 = tf.nn.relu(pre_activation, name='conv2')
  32. with tf.variable_scope('pooling2_lrn') as scope:
  33. pool2 = tf.nn.max_pool(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  34. padding='SAME', name='pooling2')
  35. #conv3
  36. with tf.variable_scope('conv3') as scope:
  37. weights = tf.get_variable('weights',
  38. shape=[3, 3, 256, 384],
  39. dtype=tf.float32,
  40. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  41. biases = tf.get_variable('biases',
  42. shape=[384],
  43. dtype=tf.float32,
  44. initializer=tf.constant_initializer(0.1))
  45. conv = tf.nn.conv2d(pool2, weights, strides=[1, 1, 1, 1], padding='SAME')
  46. pre_activation = tf.nn.bias_add(conv, biases)
  47. conv3 = tf.nn.relu(pre_activation, name='conv3')
  48. #conv4
  49. with tf.variable_scope('conv4') as scope:
  50. weights = tf.get_variable('weights',
  51. shape=[3, 3, 384, 384],
  52. dtype=tf.float32,
  53. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  54. biases = tf.get_variable('biases',
  55. shape=[384],
  56. dtype=tf.float32,
  57. initializer=tf.constant_initializer(0.1))
  58. conv = tf.nn.conv2d(conv3, weights, strides=[1, 1, 1, 1], padding='SAME')
  59. pre_activation = tf.nn.bias_add(conv, biases)
  60. conv4 = tf.nn.relu(pre_activation, name='conv4')
  61. #conv5
  62. with tf.variable_scope('conv5') as scope:
  63. weights = tf.get_variable('weights',
  64. shape=[3, 3, 384, 256],
  65. dtype=tf.float32,
  66. initializer=tf.truncated_normal_initializer(stddev=0.1, dtype=tf.float32))
  67. biases = tf.get_variable('biases',
  68. shape=[256],
  69. dtype=tf.float32,
  70. initializer=tf.constant_initializer(0.1))
  71. conv = tf.nn.conv2d(conv4, weights, strides=[1, 1, 1, 1], padding='SAME')
  72. pre_activation = tf.nn.bias_add(conv, biases)
  73. conv5 = tf.nn.relu(pre_activation, name='conv5')
  74. with tf.variable_scope('pooling2_lrn') as scope:
  75. norm5 = tf.nn.lrn(conv5, depth_radius=4, bias=1.0, alpha=0.001 / 9.0,
  76. beta=0.75, name='norm5')
  77. pool5 = tf.nn.max_pool(norm5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1],
  78. padding='SAME', name='pooling5')
  79. #fc6
  80. with tf.variable_scope('fc6') as scope:
  81. reshape = tf.reshape(pool5, shape=[batch_size, -1])
  82. dim = reshape.get_shape()[1].value
  83. weights = tf.get_variable('weights',
  84. shape=[dim, 4096],
  85. dtype=tf.float32,
  86. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  87. biases = tf.get_variable('biases',
  88. shape=[4096],
  89. dtype=tf.float32,
  90. initializer=tf.constant_initializer(0.1))
  91. fc6 = tf.nn.relu(tf.matmul(reshape, weights) + biases, name=scope.name)
  92. # dropout6
  93. with tf.name_scope('dropout6') as scope:
  94. dropout6 = tf.nn.dropout(fc6, 0.5)
  95. with tf.variable_scope('fc7') as scope:
  96. weights = tf.get_variable('weights',
  97. shape=[4096, 4096],
  98. dtype=tf.float32,
  99. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  100. biases = tf.get_variable('biases',
  101. shape=[4096],
  102. dtype=tf.float32,
  103. initializer=tf.constant_initializer(0.1))
  104. fc7 = tf.nn.relu(tf.matmul(dropout6, weights) + biases, name='fc7')
  105. # dropout7
  106. with tf.name_scope('dropout6') as scope:
  107. dropout7 = tf.nn.dropout(fc7, 0.5)
  108. #fc8
  109. with tf.variable_scope('fc8') as scope:
  110. weights = tf.get_variable('fc8',
  111. shape=[4096, n_classes],
  112. dtype=tf.float32,
  113. initializer=tf.truncated_normal_initializer(stddev=0.005, dtype=tf.float32))
  114. biases = tf.get_variable('biases',
  115. shape=[n_classes],
  116. dtype=tf.float32,
  117. initializer=tf.constant_initializer(0.1))
  118. fc8 = tf.add(tf.matmul(dropout7, weights), biases, name='fc8')
  119. return fc8
  120. def losses(logits, labels):
  121. with tf.variable_scope('loss') as scope:
  122. cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits \
  123. (logits=logits, labels=labels, name='xentropy_per_example')
  124. loss = tf.reduce_mean(cross_entropy, name='loss')
  125. tf.summary.scalar(scope.name + '/loss', loss)
  126. return loss
  127. def training(loss, learning_rate):
  128. with tf.name_scope('optimizer'):
  129. optimizer = tf.train.AdadeltaOptimizer(learning_rate=learning_rate)
  130. global_step = tf.Variable(0, name='global_step', trainable=False)
  131. train_op = optimizer.minimize(loss, global_step=global_step)
  132. return train_op
  133. def evaluation(logits, labels):
  134. with tf.variable_scope('accuracy') as scope:
  135. correct = tf.nn.in_top_k(logits, labels, 1)
  136. correct = tf.cast(correct, tf.float16)
  137. accuracy = tf.reduce_mean(correct)
  138. tf.summary.scalar(scope.name + '/accuracy', accuracy)
  139. return accuracy

总结

由于基础不好磕磕绊绊写了好久,也写了n多个版本,最后终于弄出像那么点样子的了。虽然代码看起来很简单,但是自己动手写,一点点深究,才发现并不简单,so,动手能力有待加强。

 

声明:本文内容由网友自发贡献,转载请注明出处:【wpsshop博客】
推荐阅读
相关标签
  

闽ICP备14008679号