当前位置:   article > 正文

Python实现卷积神经网络_from cnnlib.network import cnn

from cnnlib.network import cnn

代码见https://github.com/rbtbecontinued/cnn

目前尚未完全写好,支持随机梯度下降和批量梯度下降,激活函数仅支持sigmoid,输出层为softmax,池化核仅支持average,权值初始化采用Xavier方法。今后会逐渐进行补充和完善。

下面提供一个简单的测试用例。

  1. #!/usr/bin/env python3
  2. # -*- coding: utf-8 -*-
  3. """
  4. Created on Mon Jan 22 20:37:37 2018
  5. @author: 行客Thinker
  6. """
  7. import numpy as np
  8. import matplotlib.pyplot as plt
  9. from convolutional_neural_network import cnn
  10. def config_net():
  11. """
  12. 配置网络。
  13. """
  14. # 输入层
  15. size_input = np.array([8, 8])
  16. args_input = ("input", (size_input,))
  17. # C1卷积层
  18. connecting_matrix_C1 = np.ones([1, 2])
  19. size_conv_kernel_C1 = np.array([5, 5])
  20. stride_conv_kernel_C1 = 1
  21. padding_conv_C1 = 0
  22. type_activation_C1 = "sigmoid"
  23. args_C1 = ("convoluting", (connecting_matrix_C1, size_conv_kernel_C1,
  24. stride_conv_kernel_C1, padding_conv_C1,
  25. type_activation_C1))
  26. # S2池化层
  27. type_pooling_S2 = "average"
  28. size_pool_kernel_S2 = np.array([2, 2])
  29. stride_pool_kernel_S2 = 2
  30. padding_pool_S2 = 0
  31. type_activation_S2 = "sigmoid"
  32. args_S2 = ("pooling", (type_pooling_S2, size_pool_kernel_S2,
  33. stride_pool_kernel_S2, padding_pool_S2,
  34. type_activation_S2))
  35. # C3卷积层
  36. connecting_matrix_C3 = np.ones([2, 2])
  37. size_conv_kernel_C3 = np.array([2, 2])
  38. stride_conv_kernel_C3 = 1
  39. padding_conv_C3 = 0
  40. type_activation_C3 = "sigmoid"
  41. args_C3 = ("convoluting", (connecting_matrix_C3, size_conv_kernel_C3,
  42. stride_conv_kernel_C3, padding_conv_C3,
  43. type_activation_C3))
  44. # 输出层
  45. n_nodes_output = 2
  46. type_output = "softmax"
  47. args_output = ("output", (n_nodes_output, type_output))
  48. args = (args_input,
  49. args_C1,
  50. args_S2,
  51. args_C3,
  52. args_output)
  53. cnn_net = cnn()
  54. cnn_net.config(args)
  55. return cnn_net
  56. n_train = 10000
  57. X_train = 0.2 * np.random.randn(8, 8, n_train)
  58. Y_train = np.random.randint(2, size=n_train)
  59. for i in range(Y_train.shape[0]):
  60. if Y_train[i] == 0:
  61. X_train[1, :, i] += np.ones(8)
  62. elif Y_train[i] == 1:
  63. X_train[:, 1, i] += np.ones(8)
  64. size_batch = 50
  65. n_epochs = 500
  66. cnn_net = config_net()
  67. cnn_net.fit(X_train, Y_train, size_batch=size_batch, n_epochs=n_epochs)
  68. n_test = 1000
  69. X_test = 0.2 * np.random.randn(8, 8, n_test)
  70. Y_test = np.random.randint(2, size=n_test)
  71. for i in range(Y_test.shape[0]):
  72. if Y_test[i] == 0:
  73. X_test[1, :, i] += np.ones(8)
  74. elif Y_test[i] == 1:
  75. X_test[:, 1, i] += np.ones(8)
  76. correct_rate = cnn_net.test(X_test, Y_test)
  77. plt.figure()
  78. for i in range(cnn_net.layers[1].n_nodes):
  79. plt.subplot(1, 2, i + 1)
  80. plt.imshow(cnn_net.layers[1].nodes[i].conv_kernels[0], cmap="gray")
  81. plt.show()
运行结果如下图所示:


横轴为训练样本的批数,纵轴为网络对每批训练样本的损失。

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/花生_TL007/article/detail/261696
推荐阅读
相关标签
  

闽ICP备14008679号