赞
踩
# coding=utf-8 import random import numpy as np import matplotlib.pyplot as plt import tensorflow as tf from tensorflow.examples.tutorials.mnist import input_data # 数据集 mnist = input_data.read_data_sets("MNIST_DATA/", one_hot=True) # 读取MNIST,独热编码 def train_size(num): print('Total Training Images in Dataset = ' + str(mnist.train.images.shape)) # 数据集形状 print('--------------------------------------------------') x_train = mnist.train.images[:num, :] # 输入 print('x_train Examples Loaded = ' + str(x_train.shape)) # 输入数据集的形状 y_train = mnist.train.labels[:num, :] # 标签 print('y_train Examples Loaded = ' + str(y_train.shape)) # 标签数据集的形状 print('') return x_train, y_train def display_digit(num): """ 显示数字 :param num:数据集某一编号 :return: """ print(y_train[num]) label = y_train[num].argmax(axis=0) image = x_train[num].reshape([28, 28]) plt.title('Example: %d Label: %d' % (num, label)) plt.imshow(image, cmap=plt.get_cmap('gray_r')) plt.show() def display_mult_flat(start, stop): """ 显示像素分布 :param start:开始编号 :param stop:结束编号 :return: """ images = x_train[start].reshape([1, 784]) # 形状为1×784 for i in range(start + 1, stop): images = np.concatenate((images, x_train[i].reshape([1, 784]))) # 数组拼接 plt.imshow(images, cmap=plt.get_cmap('gray_r')) # 翻转灰度显示,白底黑字 plt.show() x_train, y_train = train_size(55000) # 取55000张进行训练,MNIST数据集有60000行 display_digit(random.randint(0, x_train.shape[0])) # 数据集中随意抽一张显示,0-55000不包括0、55000 display_mult_flat(0, 400) # 0-400张的像素分布图 # 定义模型 def conv2d(x, W, b, strides=1): """ 生成卷积层(ReLU) :param x:输入 :param W: 权值 :param b: 偏置 :param strides:滑动步幅 :return: """ x = tf.nn.conv2d(x, W, strides=[1, strides, strides, 1], padding='SAME') # 卷积层,输入x权值W,strides滑动步幅,边界不足时用0填充 x = tf.nn.bias_add(x, b) # 添加偏置b return tf.nn.relu(x) # 激活函数为ReLU def maxpool2d(x, k=2): """ 生成最大池化层 :param x: 输入 :param k: 窗口及步幅大小 :return: """ return tf.nn.max_pool(x, ksize=[1, k, k, 1], strides=[1, k, k, 1], padding='SAME') # 最大池化层,ksize窗口大小,strides滑动步幅 def conv_net(x, weights, biases, dropout): """ 卷积神经网络 2卷积层1全连接层1dropout层1输出层 :param x: :param weights: 权重 :param biases: 偏置 :param dropout: dropout概率 :return: """ x = tf.reshape(x, shape=[-1, 28, 28, 1]) # 重塑输入图片 conv1 = conv2d(x, weights['wc1'], biases['bc1']) # 第一层卷积层 conv1 = maxpool2d(conv1, k=2) # 最大化池化层,用于下采样 conv2 = conv2d(conv1, weights['wc2'], biases['bc2']) # 第二层卷积层 conv2 = maxpool2d(conv2, k=2) # 最大化池化层,用于下采样 fc1 = tf.reshape(conv2, [-1, weights['wd1'].get_shape().as_list()[0]]) # 重塑第二层卷积层输出以匹配全连接层的输入 fc1 = tf.add(tf.matmul(fc1, weights['wd1']), biases['bd1']) # 添加全连接层 fc1 = tf.nn.relu(fc1) # 激活函数ReLU fc1 = tf.nn.dropout(fc1, dropout) # dropout层 out = tf.add(tf.matmul(fc1, weights['out']), biases['out']) # 输出层 return out learning_rate = 0.001 # 学习率 epochs = 500 # 迭代次数 batch_size = 128 # 每一批的训练量 display_step = 10 # 显示间隔 n_input = 784 # 输入像素28*28 n_classes = 10 # 标签分类0-9 dropout = 0.85 # 保持单位的概率,可缓解过拟合 x = tf.placeholder(tf.float32, [None, n_input]) # 输入的占位符 y = tf.placeholder(tf.float32, [None, n_classes]) # 标签的占位符 keep_prob = tf.placeholder(tf.float32) # dropout的占位符 weights = { # 权重 'wc1': tf.Variable(tf.random_normal([5, 5, 1, 32])), # 5x5卷积层,1输入,32输出 'wc2': tf.Variable(tf.random_normal([5, 5, 32, 64])), # 5x5卷积层,32输入,64输出 'wd1': tf.Variable(tf.random_normal([7 * 7 * 64, 1024])), # 全连接,7x7x64输入,1024输出 'out': tf.Variable(tf.random_normal([1024, n_classes])) # 1024输入,10输出 } biases = { # 偏置 'bc1': tf.Variable(tf.random_normal([32])), # 从正态分布中产生随机值 'bc2': tf.Variable(tf.random_normal([64])), 'bd1': tf.Variable(tf.random_normal([1024])), 'out': tf.Variable(tf.random_normal([n_classes])) } pred = conv_net(x, weights, biases, keep_prob) # 预测模型,卷积神经网络 loss = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(logits=pred, labels=y)) # 损失函数,softmax交叉熵 optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(loss) # Adam优化器 correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1)) # 预测的准确率 accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32)) # 把布尔值转浮点数,取平均值 # 训练 init = tf.global_variables_initializer() train_loss = [] # 训练损失 train_acc = [] # 训练集准确率 test_acc = [] # 测试集准确率 with tf.Session() as sess: sess.run(init) step = 1 while step <= epochs: batch_x, batch_y = mnist.train.next_batch(batch_size) # 批量读取 sess.run(optimizer, feed_dict={x: batch_x, y: batch_y, keep_prob: dropout}) if step % display_step == 0: loss_train, acc_train = sess.run([loss, accuracy], feed_dict={x: batch_x, y: batch_y, keep_prob: 1.}) print("Iter " + str(step) + ", Minibatch Loss= " + "{:.2f}".format( loss_train) + ", Training Accuracy= " + "{:.2f}".format(acc_train)) # 评估 acc_test = sess.run(accuracy, feed_dict={x: mnist.test.images, y: mnist.test.labels, keep_prob: 1.}) # 测试集准确率 print("Testing Accuracy:" + "{:.2f}".format(acc_train)) train_loss.append(loss_train) train_acc.append(acc_train) test_acc.append(acc_test) step += 1 eval_indices = range(0, epochs, display_step) # 0-500,步长10。评估指数。 plt.plot(eval_indices, train_loss, 'k-') # k黑色,-实线 plt.title('Softmax Loss per iteration') # 标题 plt.xlabel('Iteration') # x轴标签 plt.ylabel('Softmax Loss') # y轴标签 plt.show() # 显示图片 # Plot train and test accuracy plt.plot(eval_indices, train_acc, 'k-', label='Train Set Accuracy') # 训练集准确率,k黑色,-实线 plt.plot(eval_indices, test_acc, 'r--', label='Test Set Accuracy') # 测试集准确率,r红色,--破折线 plt.title('Train and Test Accuracy') # 标题 plt.xlabel('Generation') # x轴标签 plt.ylabel('Accuracy') # y轴标签 plt.legend(loc='lower right') # 右下角添加图例 plt.show() # 显示图片
只使用CPU训练在代码头加上:
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
Total Training Images in Dataset = (55000, 784)
--------------------------------------------------
x_train Examples Loaded = (55000, 784)
y_train Examples Loaded = (55000, 10)
[0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]
Iter 10, Minibatch Loss= 23467.91, Training Accuracy= 0.21 Testing Accuracy:0.21 Iter 20, Minibatch Loss= 12199.31, Training Accuracy= 0.47 Testing Accuracy:0.47 Iter 30, Minibatch Loss= 8052.56, Training Accuracy= 0.61 Testing Accuracy:0.61 Iter 40, Minibatch Loss= 5332.36, Training Accuracy= 0.70 Testing Accuracy:0.70 Iter 50, Minibatch Loss= 3125.04, Training Accuracy= 0.80 Testing Accuracy:0.80 Iter 60, Minibatch Loss= 3726.54, Training Accuracy= 0.82 Testing Accuracy:0.82 Iter 70, Minibatch Loss= 2440.20, Training Accuracy= 0.85 Testing Accuracy:0.85 Iter 80, Minibatch Loss= 3303.02, Training Accuracy= 0.84 Testing Accuracy:0.84 Iter 90, Minibatch Loss= 1026.95, Training Accuracy= 0.93 Testing Accuracy:0.93 Iter 100, Minibatch Loss= 1335.14, Training Accuracy= 0.91 Testing Accuracy:0.91 Iter 110, Minibatch Loss= 2079.04, Training Accuracy= 0.85 Testing Accuracy:0.85 Iter 120, Minibatch Loss= 1131.18, Training Accuracy= 0.93 Testing Accuracy:0.93 Iter 130, Minibatch Loss= 1776.76, Training Accuracy= 0.85 Testing Accuracy:0.85 Iter 140, Minibatch Loss= 1723.12, Training Accuracy= 0.88 Testing Accuracy:0.88 Iter 150, Minibatch Loss= 773.24, Training Accuracy= 0.94 Testing Accuracy:0.94 Iter 160, Minibatch Loss= 1853.41, Training Accuracy= 0.88 Testing Accuracy:0.88 Iter 170, Minibatch Loss= 1460.82, Training Accuracy= 0.91 Testing Accuracy:0.91 Iter 180, Minibatch Loss= 983.55, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 190, Minibatch Loss= 1121.68, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 200, Minibatch Loss= 959.64, Training Accuracy= 0.94 Testing Accuracy:0.94 Iter 210, Minibatch Loss= 1255.96, Training Accuracy= 0.91 Testing Accuracy:0.91 Iter 220, Minibatch Loss= 1072.11, Training Accuracy= 0.94 Testing Accuracy:0.94 Iter 230, Minibatch Loss= 1571.01, Training Accuracy= 0.90 Testing Accuracy:0.90 Iter 240, Minibatch Loss= 1157.00, Training Accuracy= 0.90 Testing Accuracy:0.90 Iter 250, Minibatch Loss= 638.41, Training Accuracy= 0.94 Testing Accuracy:0.94 Iter 260, Minibatch Loss= 1743.98, Training Accuracy= 0.93 Testing Accuracy:0.93 Iter 270, Minibatch Loss= 1393.67, Training Accuracy= 0.93 Testing Accuracy:0.93 Iter 280, Minibatch Loss= 1372.25, Training Accuracy= 0.94 Testing Accuracy:0.94 Iter 290, Minibatch Loss= 628.57, Training Accuracy= 0.94 Testing Accuracy:0.94 Iter 300, Minibatch Loss= 388.52, Training Accuracy= 0.96 Testing Accuracy:0.96 Iter 310, Minibatch Loss= 949.66, Training Accuracy= 0.93 Testing Accuracy:0.93 Iter 320, Minibatch Loss= 878.50, Training Accuracy= 0.94 Testing Accuracy:0.94 Iter 330, Minibatch Loss= 867.22, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 340, Minibatch Loss= 423.94, Training Accuracy= 0.96 Testing Accuracy:0.96 Iter 350, Minibatch Loss= 1151.92, Training Accuracy= 0.92 Testing Accuracy:0.92 Iter 360, Minibatch Loss= 754.75, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 370, Minibatch Loss= 1832.36, Training Accuracy= 0.89 Testing Accuracy:0.89 Iter 380, Minibatch Loss= 1195.78, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 390, Minibatch Loss= 1790.18, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 400, Minibatch Loss= 242.66, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 410, Minibatch Loss= 785.68, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 420, Minibatch Loss= 747.41, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 430, Minibatch Loss= 1230.40, Training Accuracy= 0.88 Testing Accuracy:0.88 Iter 440, Minibatch Loss= 685.98, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 450, Minibatch Loss= 1284.93, Training Accuracy= 0.92 Testing Accuracy:0.92 Iter 460, Minibatch Loss= 373.52, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 470, Minibatch Loss= 1473.19, Training Accuracy= 0.95 Testing Accuracy:0.95 Iter 480, Minibatch Loss= 96.51, Training Accuracy= 0.98 Testing Accuracy:0.98 Iter 490, Minibatch Loss= 549.43, Training Accuracy= 0.97 Testing Accuracy:0.97 Iter 500, Minibatch Loss= 414.19, Training Accuracy= 0.95 Testing Accuracy:0.95
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。