当前位置:   article > 正文

[TF进阶] MNIST手写体识别完整代码&单个神经网络_手写体识别代码

手写体识别代码

实例21:识别图中模糊的手写数字

1. 下载数据集

  1. from tensorflow.examples.tutorials.mnist import input_data
  2. mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
  1. print('输入数据:',mnist.train.images)
  2. print('输入数据打印shape:',mnist.train.images.shape)
  3. import pylab
  4. im = mnist.train.images[1]
  5. im = im.reshape(-1, 28)
  6. pylab.imshow(im)
  7. pylab.show()
  8. print ('输入数据打shape:',mnist.test.images.shape)
  9. print ('输入数据打shape:',mnist.validation.images.shape)
  1. 输入数据: [[0. 0. 0. ... 0. 0. 0.]
  2. [0. 0. 0. ... 0. 0. 0.]
  3. [0. 0. 0. ... 0. 0. 0.]
  4. ...
  5. [0. 0. 0. ... 0. 0. 0.]
  6. [0. 0. 0. ... 0. 0. 0.]
  7. [0. 0. 0. ... 0. 0. 0.]]
  8. 输入数据打印shape: (55000, 784)

  1. 输入数据打shape: (10000, 784)
  2. 输入数据打shape: (5000, 784)

2. 分析图片特点

  1. import tensorflow as tf
  2. from tensorflow.examples.tutorials.mnist import input_data
  3. mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
  4. import pylab
  5. tf.reset_default_graph()
  6. # 定义占位符
  7. x = tf.placeholder(tf.float32, [None, 784]) # MNIST数据集的维度28x28=784
  8. y = tf.placeholder(tf.float32, [None, 10]) # 数字0-9,共10个类别
  1. Extracting MNIST_data/train-images-idx3-ubyte.gz
  2. Extracting MNIST_data/train-labels-idx1-ubyte.gz
  3. Extracting MNIST_data/t10k-images-idx3-ubyte.gz
  4. Extracting MNIST_data/t10k-labels-idx1-ubyte.gz

3. 构建模型

  1. # 初始化模型权重
  2. W = tf.Variable(tf.random_normal([784, 10]))
  3. b = tf.Variable(tf.zeros([10]))
  4. # softmax分类
  5. pred = tf.nn.softmax(tf.matmul(x, W) + b)
  6. # 损失函数
  7. cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
  8. # 定义参数
  9. learning_rate = 0.01
  10. # 使用梯度下降优化器
  11. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)

4. 训练模型

  1. training_epochs = 25
  2. batch_size = 100
  3. display_step = 1
  4. saver = tf.train.Saver()
  5. model_path = "H:/tensorflow_projects/chap5/mnist_model.ckpt"
  6. # 启动session
  7. with tf.Session() as sess:
  8. sess.run(tf.global_variables_initializer())# Initializing OP
  9. # 启动循环开始训练
  10. for epoch in range(training_epochs):
  11. avg_cost = 0.
  12. total_batch = int(mnist.train.num_examples/batch_size)
  13. # 遍历全部数据集
  14. for i in range(total_batch):
  15. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  16. # Run optimization op (backprop) and cost op (to get loss value)
  17. _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs, y: batch_ys})
  18. # Compute average loss
  19. avg_cost += c / total_batch
  20. # 显示训练中的详细信息
  21. if (epoch+1) % display_step == 0:
  22. print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
  23. print( " Finished!")
  24. # 测试 model
  25. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  26. # 计算准确率
  27. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  28. print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
  29. # Save model weights to disk
  30. save_path = saver.save(sess, model_path)
  31. print("Model saved in file: %s" % save_path)
  1. Epoch: 0001 cost= 8.528780973
  2. Epoch: 0002 cost= 4.351987058
  3. Epoch: 0003 cost= 3.044533993
  4. Epoch: 0004 cost= 2.405865938
  5. Epoch: 0005 cost= 2.023756936
  6. Epoch: 0006 cost= 1.771609710
  7. Epoch: 0007 cost= 1.594264874
  8. Epoch: 0008 cost= 1.463273387
  9. Epoch: 0009 cost= 1.362599298
  10. Epoch: 0010 cost= 1.283132398
  11. Epoch: 0011 cost= 1.218332462
  12. Epoch: 0012 cost= 1.164574228
  13. Epoch: 0013 cost= 1.118905594
  14. Epoch: 0014 cost= 1.079640089
  15. Epoch: 0015 cost= 1.045503370
  16. Epoch: 0016 cost= 1.015250035
  17. Epoch: 0017 cost= 0.988325027
  18. Epoch: 0018 cost= 0.963962568
  19. Epoch: 0019 cost= 0.942083137
  20. Epoch: 0020 cost= 0.922068430
  21. Epoch: 0021 cost= 0.903581946
  22. Epoch: 0022 cost= 0.886608397
  23. Epoch: 0023 cost= 0.870939313
  24. Epoch: 0024 cost= 0.856314616
  25. Epoch: 0025 cost= 0.842578177
  26. Finished!
  27. Accuracy: 0.825
  28. Model saved in file: H:/tensorflow_projects/chap5/mnist_model.ckpt

5. 测试模型

  1. # 测试 model
  2. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  3. # 计算准确率
  4. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  5. print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))

6. 保存模型

  1. # Save model weights to disk
  2. save_path = saver.save(sess, model_path)
  3. print("Model saved in file: %s" % save_path)
saver = tf.train.Saver()

7. 读取模型

  1. #读取模型
  2. print("Starting 2nd session...")
  3. with tf.Session() as sess:
  4. # Initialize variables
  5. sess.run(tf.global_variables_initializer())
  6. # Restore model weights from previously saved model
  7. saver.restore(sess, model_path)
  8. # 测试 model
  9. correct_prediction = tf.equal(tf.argmax(pred, 1), tf.argmax(y, 1))
  10. # 计算准确率
  11. accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
  12. print ("Accuracy:", accuracy.eval({x: mnist.test.images, y: mnist.test.labels}))
  13. output = tf.argmax(pred, 1)
  14. batch_xs, batch_ys = mnist.train.next_batch(2)
  15. outputval,predv = sess.run([output,pred], feed_dict={x: batch_xs})
  16. print(outputval,predv,batch_ys)
  17. im = batch_xs[0]
  18. im = im.reshape(-1,28)
  19. pylab.imshow(im)
  20. pylab.show()
  21. im = batch_xs[1]
  22. im = im.reshape(-1,28)
  23. pylab.imshow(im)
  24. pylab.show()
  1. Accuracy: 0.825
  2. [0 8] [[9.9999976e-01 4.6237684e-18 2.0244670e-08 4.7625484e-08 7.0704164e-18
  3. 2.7070349e-10 9.5091435e-12 6.9175507e-17 9.4598128e-08 7.1266972e-15]
  4. [5.7434350e-05 3.0411970e-02 1.3331110e-02 1.6055863e-01 1.1928177e-03
  5. 2.4296941e-02 9.0290455e-04 1.7760798e-05 7.6825178e-01 9.7868522e-04]] [[1. 0. 0. 0. 0. 0. 0. 0. 0. 0.]
  6. [0. 0. 0. 0. 0. 0. 0. 0. 1. 0.]]


实例22:交叉熵实验

  • softmax_cross_entropy_with_logits
  • -tf.reduce_sum(labels * tf.log(tf.nn.softxmax(logits)), 1)
  1. # -*- coding: utf-8 -*-
  2. import tensorflow as tf
  3. labels = [[0,0,1],[0,1,0]]
  4. logits = [[2, 0.5,6],
  5. [0.1,0, 3]]
  6. logits_scaled = tf.nn.softmax(logits)
  7. logits_scaled2 = tf.nn.softmax(logits_scaled)
  8. result1 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
  9. result2 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits_scaled)
  10. result3 = -tf.reduce_sum(labels*tf.log(logits_scaled),1)
  11. with tf.Session() as sess:
  12. print ("scaled=",sess.run(logits_scaled))
  13. print ("scaled2=",sess.run(logits_scaled2)) #经过第二次的softmax后,分布概率会有变化
  14. print ("rel1=",sess.run(result1),"\n")#正确的方式
  15. print ("rel2=",sess.run(result2),"\n")#如果将softmax变换完的值放进去会,就相当于算第二次softmax的loss,所以会出错
  16. print ("rel3=",sess.run(result3))
scaled= [[0.01791432 0.00399722 0.97808844]
 [0.04980332 0.04506391 0.90513283]]
scaled2= [[0.21747023 0.21446465 0.56806517]
 [0.2300214  0.22893383 0.5410447 ]]
rel1= [0.02215516 3.0996735 ] 

rel2= [0.56551915 1.4743223 ] 

rel3= [0.02215518 3.0996735 ]

实例23: one_hot实验

  1. #标签总概率为1
  2. labels = [[0.4,0.1,0.5],[0.3,0.6,0.1]]
  3. result4 = tf.nn.softmax_cross_entropy_with_logits(labels=labels, logits=logits)
  4. with tf.Session() as sess:
  5. print ("rel4=",sess.run(result4),"\n")
rel4= [2.1721554 2.7696736] 

实例24:sparse交叉熵的使用

  1. #sparse
  2. labels = [2,1] #其实是0 1 2 三个类。等价 第一行 001 第二行 010
  3. result5 = tf.nn.sparse_softmax_cross_entropy_with_logits(labels=labels, logits=logits)
  4. with tf.Session() as sess:
  5. print ("rel5=",sess.run(result5),"\n")
rel5= [0.02215516 3.0996735 ] 

实例25:计算loss值

tf.reduce_mean(-tf.reduce_sum(labels * tf.log(logits_scaled),1) ) = tf.reduce_mean(result)

  1. #注意!!!这个函数的返回值并不是一个数,而是一个向量,
  2. #如果要求交叉熵loss,我们要对向量求均值,
  3. #就是对向量再做一步tf.reduce_mean操作
  4. loss=tf.reduce_mean(result1)
  5. with tf.Session() as sess:
  6. print ("loss=",sess.run(loss))
loss= 1.5609143
  1. labels = [[0,0,1],[0,1,0]]
  2. loss2 = tf.reduce_mean(-tf.reduce_sum(labels * tf.log(logits_scaled),1) )
  3. with tf.Session() as sess:
  4. print ("loss2=",sess.run(loss2))
loss2= 1.5609144
  1. # -*- coding: utf-8 -*-
  2. from tensorflow.examples.tutorials.mnist import input_data
  3. mnist = input_data.read_data_sets("MNIST_data/")
  4. print ('输入数据:',mnist.train.images)
  5. print ('输入数据打shape:',mnist.train.images.shape)
  6. import pylab
  7. im = mnist.train.images[1]
  8. im = im.reshape(-1,28)
  9. pylab.imshow(im)
  10. pylab.show()
  11. print ('输入数据打shape:',mnist.test.images.shape)
  12. print ('输入数据打shape:',mnist.validation.images.shape)
  13. import tensorflow as tf #导入tensorflow库
  14. tf.reset_default_graph()
  15. # tf Graph Input
  16. x = tf.placeholder(tf.float32, [None, 784]) # mnist data维度 28*28=784
  17. y = tf.placeholder(tf.int32, [None]) # 0-9 数字=> 10 classes
  18. # Set model weights
  19. W = tf.Variable(tf.random_normal([784, 10]))
  20. b = tf.Variable(tf.zeros([10]))
  21. z= tf.matmul(x, W) + b
  22. # 构建模型
  23. pred = tf.nn.softmax(z) # Softmax分类
  24. # Minimize error using cross entropy
  25. #cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
  26. cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=z))
  27. #参数设置
  28. learning_rate = 0.01
  29. # 使用梯度下降优化器
  30. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  31. training_epochs = 25
  32. batch_size = 100
  33. display_step = 1
  34. # 启动session
  35. with tf.Session() as sess:
  36. sess.run(tf.global_variables_initializer())# Initializing OP
  37. # 启动循环开始训练
  38. for epoch in range(training_epochs):
  39. avg_cost = 0.
  40. total_batch = int(mnist.train.num_examples/batch_size)
  41. # 遍历全部数据集
  42. for i in range(total_batch):
  43. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  44. # Run optimization op (backprop) and cost op (to get loss value)
  45. _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
  46. y: batch_ys})
  47. # Compute average loss
  48. avg_cost += c / total_batch
  49. # 显示训练中的详细信息
  50. if (epoch+1) % display_step == 0:
  51. print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
  52. print( " Finished!")
输入数据: [[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]]
输入数据打shape: (55000, 784)

 

输入数据打shape: (10000, 784)
输入数据打shape: (5000, 784)
Epoch: 0001 cost= 8.143192529
Epoch: 0002 cost= 4.322669148
Epoch: 0003 cost= 2.981214518
Epoch: 0004 cost= 2.356783852
Epoch: 0005 cost= 1.998906395
Epoch: 0006 cost= 1.765469893
Epoch: 0007 cost= 1.600443805
Epoch: 0008 cost= 1.477601487
Epoch: 0009 cost= 1.381630285
Epoch: 0010 cost= 1.305191407
Epoch: 0011 cost= 1.241832566
Epoch: 0012 cost= 1.188988984
Epoch: 0013 cost= 1.143483993
Epoch: 0014 cost= 1.104311068
Epoch: 0015 cost= 1.069696186
Epoch: 0016 cost= 1.039322816
Epoch: 0017 cost= 1.012039655
Epoch: 0018 cost= 0.987467080
Epoch: 0019 cost= 0.965332884
Epoch: 0020 cost= 0.945004881
Epoch: 0021 cost= 0.926361536
Epoch: 0022 cost= 0.909262278
Epoch: 0023 cost= 0.893383189
Epoch: 0024 cost= 0.878501318
Epoch: 0025 cost= 0.864673607
 Finished!

实例26 学习率衰减

  1. # -*- coding: utf-8 -*-
  2. import tensorflow as tf
  3. global_step = tf.Variable(0, trainable=False)
  4. initial_learning_rate = 0.1 #初始学习率
  5. learning_rate = tf.train.exponential_decay(initial_learning_rate,
  6. global_step,
  7. decay_steps=10,decay_rate=0.9)
  8. opt = tf.train.GradientDescentOptimizer(learning_rate)
  9. add_global = global_step.assign_add(1)
  10. with tf.Session() as sess:
  11. tf.global_variables_initializer().run()
  12. print(sess.run(learning_rate))
  13. for i in range(20):
  14. g, rate = sess.run([add_global, learning_rate])
  15. print(g,rate)
0.1
1 0.1
2 0.09791484
3 0.09688862
4 0.095873155
5 0.094868325
6 0.09387404
7 0.092890166
8 0.09191661
9 0.09095325
10 0.089999996
11 0.08905673
12 0.088123344
13 0.08719975
14 0.08628584
15 0.0853815
16 0.084486626
17 0.08360115
18 0.08272495
19 0.08185792
20 0.08099999


实例27:·Maxout网络实现mnist分类.py

  1. # -*- coding: utf-8 -*-
  2. from tensorflow.examples.tutorials.mnist import input_data
  3. mnist = input_data.read_data_sets("H:/tensorflow_projects/chap6/MNIST_data/")
  4. print ('输入数据:',mnist.train.images)
  5. print ('输入数据打shape:',mnist.train.images.shape)
  6. import pylab
  7. im = mnist.train.images[1]
  8. im = im.reshape(-1,28)
  9. pylab.imshow(im)
  10. pylab.show()
  11. print ('输入数据打shape:',mnist.test.images.shape)
  12. print ('输入数据打shape:',mnist.validation.images.shape)
  13. import tensorflow as tf #导入tensorflow库
  14. def max_out(inputs, num_units, axis=None):
  15. shape = inputs.get_shape().as_list()
  16. if shape[0] is None:
  17. shape[0] = -1
  18. if axis is None: # Assume that channel is the last dimension
  19. axis = -1
  20. num_channels = shape[axis]
  21. if num_channels % num_units:
  22. raise ValueError('number of features({}) is not '
  23. 'a multiple of num_units({})'.format(num_channels, num_units))
  24. shape[axis] = num_units
  25. shape += [num_channels // num_units]
  26. outputs = tf.reduce_max(tf.reshape(inputs, shape), -1, keep_dims=False)
  27. return outputs
  28. tf.reset_default_graph()
  29. # tf Graph Input
  30. x = tf.placeholder(tf.float32, [None, 784]) # mnist data维度 28*28=784
  31. y = tf.placeholder(tf.int32, [None]) # 0-9 数字=> 10 classes
  32. # Set model weights
  33. W = tf.Variable(tf.random_normal([784, 100]))
  34. b = tf.Variable(tf.zeros([100]))
  35. z= tf.matmul(x, W) + b
  36. #maxout = tf.reduce_max(z,axis= 1,keep_dims=True)
  37. maxout= max_out(z, 50)
  38. # Set model weights
  39. W2 = tf.Variable(tf.truncated_normal([50, 10], stddev=0.1))
  40. b2 = tf.Variable(tf.zeros([10]))
  41. # 构建模型
  42. #pred = tf.nn.softmax(tf.matmul(maxout, W2) + b2)
  43. pred = tf.matmul(maxout, W2) + b2
  44. # 构建模型
  45. #pred = tf.nn.softmax(z) # Softmax分类
  46. # Minimize error using cross entropy
  47. #cost = tf.reduce_mean(-tf.reduce_sum(y*tf.log(pred), reduction_indices=1))
  48. cost = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(labels=y, logits=pred))
  49. #参数设置
  50. learning_rate = 0.04
  51. # 使用梯度下降优化器
  52. optimizer = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  53. training_epochs = 200
  54. batch_size = 100
  55. display_step = 1
  56. # 启动session
  57. with tf.Session() as sess:
  58. sess.run(tf.global_variables_initializer())# Initializing OP
  59. # 启动循环开始训练
  60. for epoch in range(training_epochs):
  61. avg_cost = 0.
  62. total_batch = int(mnist.train.num_examples/batch_size)
  63. # 遍历全部数据集
  64. for i in range(total_batch):
  65. batch_xs, batch_ys = mnist.train.next_batch(batch_size)
  66. # Run optimization op (backprop) and cost op (to get loss value)
  67. _, c = sess.run([optimizer, cost], feed_dict={x: batch_xs,
  68. y: batch_ys})
  69. # Compute average loss
  70. avg_cost += c / total_batch
  71. # 显示训练中的详细信息
  72. if (epoch+1) % display_step == 0:
  73. print ("Epoch:", '%04d' % (epoch+1), "cost=", "{:.9f}".format(avg_cost))
  74. print( " Finished!")
Extracting H:/tensorflow_projects/chap6/MNIST_data/train-images-idx3-ubyte.gz
Extracting H:/tensorflow_projects/chap6/MNIST_data/train-labels-idx1-ubyte.gz
Extracting H:/tensorflow_projects/chap6/MNIST_data/t10k-images-idx3-ubyte.gz
Extracting H:/tensorflow_projects/chap6/MNIST_data/t10k-labels-idx1-ubyte.gz
输入数据: [[0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 ...
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]
 [0. 0. 0. ... 0. 0. 0.]]
输入数据打shape: (55000, 784)

输入数据打shape: (10000, 784)
输入数据打shape: (5000, 784)
Epoch: 0001 cost= 1.669748494
Epoch: 0002 cost= 0.819802765
Epoch: 0003 cost= 0.668256996
Epoch: 0004 cost= 0.599882030
Epoch: 0005 cost= 0.551539327
Epoch: 0006 cost= 0.519114701
Epoch: 0007 cost= 0.501650673
Epoch: 0008 cost= 0.480439953
Epoch: 0009 cost= 0.465431287
Epoch: 0010 cost= 0.454214447
Epoch: 0011 cost= 0.442614048
Epoch: 0012 cost= 0.429748516
Epoch: 0013 cost= 0.419512733
Epoch: 0014 cost= 0.412809217
Epoch: 0015 cost= 0.403128482
Epoch: 0016 cost= 0.395945490
Epoch: 0017 cost= 0.387481769
Epoch: 0018 cost= 0.382592868
Epoch: 0019 cost= 0.376352434
Epoch: 0020 cost= 0.371442565
Epoch: 0021 cost= 0.366640467
Epoch: 0022 cost= 0.360618622
Epoch: 0023 cost= 0.357322852
Epoch: 0024 cost= 0.353282172
Epoch: 0025 cost= 0.348204653
Epoch: 0026 cost= 0.344141857
Epoch: 0027 cost= 0.340688343
Epoch: 0028 cost= 0.336875352
Epoch: 0029 cost= 0.332229141
Epoch: 0030 cost= 0.329368933
Epoch: 0031 cost= 0.324990445
Epoch: 0032 cost= 0.323535117
Epoch: 0033 cost= 0.319696042
Epoch: 0034 cost= 0.316543529
Epoch: 0035 cost= 0.314367712
Epoch: 0036 cost= 0.309627955
Epoch: 0037 cost= 0.308954497
Epoch: 0038 cost= 0.305743327
Epoch: 0039 cost= 0.303948994
Epoch: 0040 cost= 0.300707549
Epoch: 0041 cost= 0.298111228
Epoch: 0042 cost= 0.295571287
Epoch: 0043 cost= 0.293599232
Epoch: 0044 cost= 0.292371846
Epoch: 0045 cost= 0.290433042
Epoch: 0046 cost= 0.286466155
Epoch: 0047 cost= 0.284913121
Epoch: 0048 cost= 0.282463599
Epoch: 0049 cost= 0.282443535
Epoch: 0050 cost= 0.278840295
Epoch: 0051 cost= 0.277910688
Epoch: 0052 cost= 0.275044623
Epoch: 0053 cost= 0.274304534
Epoch: 0054 cost= 0.271387891
Epoch: 0055 cost= 0.270530891
Epoch: 0056 cost= 0.269293524
Epoch: 0057 cost= 0.267875358
Epoch: 0058 cost= 0.265286128
Epoch: 0059 cost= 0.263074537
Epoch: 0060 cost= 0.261540208
Epoch: 0061 cost= 0.261259574
Epoch: 0062 cost= 0.259737343
Epoch: 0063 cost= 0.258162930
Epoch: 0064 cost= 0.256089119
Epoch: 0065 cost= 0.254655639
Epoch: 0066 cost= 0.253505012
Epoch: 0067 cost= 0.252484518
Epoch: 0068 cost= 0.249667299
Epoch: 0069 cost= 0.249462925
Epoch: 0070 cost= 0.249046204
Epoch: 0071 cost= 0.247562397
Epoch: 0072 cost= 0.245829041
Epoch: 0073 cost= 0.244501937
Epoch: 0074 cost= 0.243986385
Epoch: 0075 cost= 0.242621479
Epoch: 0076 cost= 0.241314949
Epoch: 0077 cost= 0.238647706
Epoch: 0078 cost= 0.238957213
Epoch: 0079 cost= 0.237347329
Epoch: 0080 cost= 0.234964659
Epoch: 0081 cost= 0.236123101
Epoch: 0082 cost= 0.233973439
Epoch: 0083 cost= 0.232953551
Epoch: 0084 cost= 0.232046905
Epoch: 0085 cost= 0.229982579
Epoch: 0086 cost= 0.229070544
Epoch: 0087 cost= 0.228393014
Epoch: 0088 cost= 0.227479590
Epoch: 0089 cost= 0.227268234
Epoch: 0090 cost= 0.225049027
Epoch: 0091 cost= 0.224516309
Epoch: 0092 cost= 0.223888728
Epoch: 0093 cost= 0.223191615
Epoch: 0094 cost= 0.221796969
Epoch: 0095 cost= 0.221250222
Epoch: 0096 cost= 0.220323073
Epoch: 0097 cost= 0.218742449
Epoch: 0098 cost= 0.218513060
Epoch: 0099 cost= 0.217564493
Epoch: 0100 cost= 0.215474659
Epoch: 0101 cost= 0.214555269
Epoch: 0102 cost= 0.213661779
Epoch: 0103 cost= 0.214191178
Epoch: 0104 cost= 0.213189474
Epoch: 0105 cost= 0.212041208
Epoch: 0106 cost= 0.211847621
Epoch: 0107 cost= 0.210278228
Epoch: 0108 cost= 0.208721001
Epoch: 0109 cost= 0.209450811
Epoch: 0110 cost= 0.207888889
Epoch: 0111 cost= 0.206186019
Epoch: 0112 cost= 0.205807320
Epoch: 0113 cost= 0.205915253
Epoch: 0114 cost= 0.204875258
Epoch: 0115 cost= 0.204274523
Epoch: 0116 cost= 0.204331738
Epoch: 0117 cost= 0.201808658
Epoch: 0118 cost= 0.201525647
Epoch: 0119 cost= 0.199703673
Epoch: 0120 cost= 0.200700889
Epoch: 0121 cost= 0.199350320
Epoch: 0122 cost= 0.198106946
Epoch: 0123 cost= 0.198094789
Epoch: 0124 cost= 0.196696438
Epoch: 0125 cost= 0.196361274
Epoch: 0126 cost= 0.196492676
Epoch: 0127 cost= 0.194797525
Epoch: 0128 cost= 0.194349858
Epoch: 0129 cost= 0.193110045
Epoch: 0130 cost= 0.192708968
Epoch: 0131 cost= 0.192399970
Epoch: 0132 cost= 0.190516700
Epoch: 0133 cost= 0.190331284
Epoch: 0134 cost= 0.190980941
Epoch: 0135 cost= 0.189532741
Epoch: 0136 cost= 0.188812766
Epoch: 0137 cost= 0.187239818
Epoch: 0138 cost= 0.187442517
Epoch: 0139 cost= 0.186436391
Epoch: 0140 cost= 0.185879297
Epoch: 0141 cost= 0.184914501
Epoch: 0142 cost= 0.185321765
Epoch: 0143 cost= 0.183773249
Epoch: 0144 cost= 0.183931502
Epoch: 0145 cost= 0.183287879
Epoch: 0146 cost= 0.182621817
Epoch: 0147 cost= 0.181577222
Epoch: 0148 cost= 0.180124871
Epoch: 0149 cost= 0.181275859
Epoch: 0150 cost= 0.180238542
Epoch: 0151 cost= 0.178712672
Epoch: 0152 cost= 0.178188846
Epoch: 0153 cost= 0.177580589
Epoch: 0154 cost= 0.177027715
Epoch: 0155 cost= 0.177836312
Epoch: 0156 cost= 0.176792373
Epoch: 0157 cost= 0.175756311
Epoch: 0158 cost= 0.174947099
Epoch: 0159 cost= 0.174266882
Epoch: 0160 cost= 0.174342527
Epoch: 0161 cost= 0.172602550
Epoch: 0162 cost= 0.172811079
Epoch: 0163 cost= 0.172335094
Epoch: 0164 cost= 0.171968882
Epoch: 0165 cost= 0.171027398
Epoch: 0166 cost= 0.169943000
Epoch: 0167 cost= 0.170124644
Epoch: 0168 cost= 0.168496490
Epoch: 0169 cost= 0.169623626
Epoch: 0170 cost= 0.168593532
Epoch: 0171 cost= 0.167650817
Epoch: 0172 cost= 0.167899388
Epoch: 0173 cost= 0.166965650
Epoch: 0174 cost= 0.166645279
Epoch: 0175 cost= 0.166120962
Epoch: 0176 cost= 0.165155771
Epoch: 0177 cost= 0.165017686
Epoch: 0178 cost= 0.163808241
Epoch: 0179 cost= 0.163797412
Epoch: 0180 cost= 0.162719157
Epoch: 0181 cost= 0.163193959
Epoch: 0182 cost= 0.161633140
Epoch: 0183 cost= 0.162454181
Epoch: 0184 cost= 0.161832177
Epoch: 0185 cost= 0.161416251
Epoch: 0186 cost= 0.159936835
Epoch: 0187 cost= 0.160258861
Epoch: 0188 cost= 0.159245104
Epoch: 0189 cost= 0.158908117
Epoch: 0190 cost= 0.157777246
Epoch: 0191 cost= 0.157958048
Epoch: 0192 cost= 0.157402902
Epoch: 0193 cost= 0.157361584
Epoch: 0194 cost= 0.156321988
Epoch: 0195 cost= 0.156084833
Epoch: 0196 cost= 0.155017134
Epoch: 0197 cost= 0.155896032
Epoch: 0198 cost= 0.154472644
Epoch: 0199 cost= 0.154645715
Epoch: 0200 cost= 0.153077820
 Finished!

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/343027?site
推荐阅读
相关标签
  

闽ICP备14008679号