当前位置:   article > 正文

Python实战演练之python实现神经网络模型算法_python神经网络预测模型

python神经网络预测模型

 

 

python实现神经网络模型算法

8c107be3a5da79c3f15a4b22afc8e73a.gif

 

 

acb8949c713f12abbc5595bd5ef39b01.gif

今天,后背小罗和大家分享用Python实现神经网络模型算法,仅用于技术学习交流。

 

9292b07fdf90bd2f5f88b933b7ca44f1.png

实现技巧

 

1.导入依赖库

主要是安装相关的依赖库。本文实现的环境为:python 3.7。

  1. from __future__ import division
  2. import math
  3. import random
  4. import pandas as pd

2.构建BP神经网络类:

主要是构建三层反向传播神经网络类。

  1. """ 三层反向传播神经网络 """
  2. class NN:
  3. def __init__(self, ni, nh, no):
  4. self.ni = ni + 1 # 输入层节点
  5. self.nh = nh + 1 # 隐藏层节点
  6. self.no = no # 输出层种类
  7. self.ai = [1.0] * self.ni
  8. self.ah = [1.0] * self.nh
  9. self.ao = [1.0] * self.no
  10. self.wi = self.makeMatrix(self.ni, self.nh) # 输出层到隐藏层的映射矩阵
  11. self.wo = self.makeMatrix(self.nh, self.no) # 隐藏层到输出层的映射矩阵
  12. for i in range(self.ni):
  13. for j in range(self.nh):
  14. self.wi[i][j] = self.rand(-0.2, 0.2)
  15. for j in range(self.nh):
  16. for k in range(self.no):
  17. self.wo[j][k] = self.rand(-2, 2)
  18. #前向传播,激活神经网络的所有节点
  19. def update(self, inputs):
  20. if len(inputs) != self.ni - 1:
  21. print(len(inputs),self.ni - 1)
  22. raise ValueError('与输入层节点数不符!')
  23. for i in range(self.ni - 1):
  24. self.ai[i] = inputs[i]
  25. for j in range(self.nh): # self.nh表示隐藏层的节点数
  26. sum = 0.0 # 激活项a = g(z) z = Θ^T x ;sum相当于z,每次循环归零
  27. for i in range(self.ni): #通过循环z = Θ^T x ,因为Θ、x均为向量
  28. sum = sum + self.ai[i] * self.wi[i][j] #〖 Z〗^((2))=Θ^((1)) a^((1))
  29. self.ah[j] = self.sigmoid(sum) # a^((2))=g(z^((2))),这里使用sigmoid()函数作为激活函数
  30. for k in range(self.no):
  31. sum = 0.0
  32. for j in range(self.nh):
  33. sum = sum + self.ah[j] * self.wo[j][k] #〖 Z〗^((3))=Θ^((2)) a^((2))
  34. self.ao[k] = self.sigmoid(sum) # a^((3))=g(z^((3)))
  35. return self.ao[:]
  36. #反向传播,计算节点激活项的误差
  37. def backPropagate(self, targets, lr): # targets为某样本实际种类分类,lr为梯度下降算法的学习率
  38. output_deltas = [0.0] * self.no
  39. for k in range(self.no):
  40. error = targets[k] - np.round_(self.ao[k])
  41. output_deltas[k] = self.dsigmoid(self.ao[k]) * error
  42. # 计算隐藏层的误差
  43. hidden_deltas = [0.0] * self.nh
  44. for j in range(self.nh):
  45. error = 0.0
  46. for k in range(self.no):
  47. error = error + output_deltas[k] * self.wo[j][k]
  48. hidden_deltas[j] = self.dsigmoid(self.ah[j]) * error
  49. # 更新输出层权重
  50. for j in range(self.nh): # 反向传播算法,求出每个节点的误差后,反向更新权重
  51. for k in range(self.no):
  52. change = output_deltas[k] * self.ah[j]
  53. self.wo[j][k] = self.wo[j][k] + lr * change
  54. # 更新输入层权重
  55. for i in range(self.ni):
  56. for j in range(self.nh):
  57. change = hidden_deltas[j] * self.ai[i]
  58. self.wi[i][j] = self.wi[i][j] + lr * change
  59. # 计算误差
  60. error = 0.0
  61. for k in range(self.no):
  62. error += 0.5 * (targets[k] - np.round_(self.ao[k])) ** 2
  63. return error
  64. #用测试集输出准确率
  65. def test(self, patterns):
  66. count = 0
  67. num=0
  68. for p in patterns:
  69. target = p[1]
  70. result = self.update(p[0])
  71. print(p[0], ':', target, '->', np.round_(result))
  72. num=0
  73. for k in range(self.no):
  74. if (target[k] == np.round_(result[k])):
  75. num +=1
  76. print(num)
  77. if num==3:
  78. count +=1
  79. print("******************",(target) == (np.round_(result)),"******************")
  80. accuracy = int(float(count / len(patterns))*100)
  81. print('accuracy: %-.9f' % accuracy,"%")
  82. #输出训练过后神经网络的权重矩阵
  83. def weights(self):
  84. print('输入层权重:')
  85. for i in range(self.ni):
  86. print(self.wi[i])
  87. print()
  88. print('输出层权重:')
  89. for j in range(self.nh):
  90. print(self.wo[j])
  91. #用训练集训练神经网络
  92. def train(self, patterns, iterations=1000, lr=0.1):
  93. for i in range(iterations):
  94. error = 0.0
  95. for p in patterns:
  96. inputs = p[0]
  97. targets = p[1]
  98. self.update(inputs)
  99. error = error + self.backPropagate(targets, lr)
  100. if i % 100 == 0:
  101. print("percent:",int(i/iterations*100),"%",' error: %-.9f' % error)
  102. #生成区间[a, b)内的随机数
  103. def rand(self, a, b):
  104. return (b - a) * random.random() + a
  105. # 生成大小 I*J 的矩阵,默认零矩阵
  106. def makeMatrix(self, I, J, fill=0.0):
  107. m = []
  108. for i in range(I):
  109. m.append([fill] * J)
  110. return m
  111. # 函数 sigmoid,bp神经网络前向传播的激活函数
  112. def sigmoid(self, x):
  113. return 1.0 / (1.0 + math.exp(-x))
  114. # 函数 sigmoid 的导数,反向传播时使用
  115. def dsigmoid(self, x):
  116. return x * (1 - x)

3.读取数据并进行预处理:

主要是读取构建分类模型的数据,并进行预处理。

  1. data = []
  2. raw = pd.read_csv('iris.csv')
  3. raw_data = raw.values
  4. raw_feature = raw_data[1:, 1:5]
  5. for i in range(len(raw_feature)):
  6. ele = []
  7. ele.append(list(raw_feature[i]))
  8. if raw_data[i][5] == 0:
  9. ele.append([0, 0,1])
  10. elif raw_data[i][5] == 1:
  11. ele.append([0,1, 0])
  12. elif raw_data[i][5] == 2:
  13. ele.append([1, 1,1])
  14. else:
  15. ele.append([0, 0,0])
  16. data.append(ele)

4.利用构建的BP神经网络预测类,创建神经网络模型:

主要是用BP神经网络预测类创建神经网络类模型。

  nn = NN(4, 10, 3)  

5.BP分类模型训练及预测:

主要是划分训练集和测试集,并进行BP分类模型训练和预测。

  1. training = data[1:100]
  2. test = data[101:]
  3. nn.train(training, iterations=1000)
  4. nn.test(test)

完整源代码(只做参考):

  1. from __future__ import division
  2. import math
  3. import random
  4. import pandas as pd
  5. import numpy as np
  6. """ 三层反向传播神经网络 """
  7. class NN:
  8. def __init__(self, ni, nh, no):
  9. self.ni = ni + 1 # 输入层节点
  10. self.nh = nh + 1 # 隐藏层节点
  11. self.no = no # 输出层种类
  12. self.ai = [1.0] * self.ni
  13. self.ah = [1.0] * self.nh
  14. self.ao = [1.0] * self.no
  15. self.wi = self.makeMatrix(self.ni, self.nh) # 输出层到隐藏层的映射矩阵
  16. self.wo = self.makeMatrix(self.nh, self.no) # 隐藏层到输出层的映射矩阵
  17. for i in range(self.ni):
  18. for j in range(self.nh):
  19. self.wi[i][j] = self.rand(-0.2, 0.2)
  20. for j in range(self.nh):
  21. for k in range(self.no):
  22. self.wo[j][k] = self.rand(-2, 2)
  23. #前向传播,激活神经网络的所有节点
  24. def update(self, inputs):
  25. if len(inputs) != self.ni - 1:
  26. print(len(inputs),self.ni - 1)
  27. raise ValueError('与输入层节点数不符!')
  28. for i in range(self.ni - 1):
  29. self.ai[i] = inputs[i]
  30. for j in range(self.nh): # self.nh表示隐藏层的节点数
  31. sum = 0.0 # 激活项a = g(z) z = Θ^T x ;sum相当于z,每次循环归零
  32. for i in range(self.ni): #通过循环z = Θ^T x ,因为Θ、x均为向量
  33. sum = sum + self.ai[i] * self.wi[i][j] #〖 Z〗^((2))=Θ^((1)) a^((1))
  34. self.ah[j] = self.sigmoid(sum) # a^((2))=g(z^((2))),这里使用sigmoid()函数作为激活函数
  35. for k in range(self.no):
  36. sum = 0.0
  37. for j in range(self.nh):
  38. sum = sum + self.ah[j] * self.wo[j][k] #〖 Z〗^((3))=Θ^((2)) a^((2))
  39. self.ao[k] = self.sigmoid(sum) # a^((3))=g(z^((3)))
  40. return self.ao[:]
  41. #反向传播,计算节点激活项的误差
  42. def backPropagate(self, targets, lr): # targets为某样本实际种类分类,lr为梯度下降算法的学习率
  43. output_deltas = [0.0] * self.no
  44. for k in range(self.no):
  45. error = targets[k] - np.round_(self.ao[k])
  46. output_deltas[k] = self.dsigmoid(self.ao[k]) * error
  47. # 计算隐藏层的误差
  48. hidden_deltas = [0.0] * self.nh
  49. for j in range(self.nh):
  50. error = 0.0
  51. for k in range(self.no):
  52. error = error + output_deltas[k] * self.wo[j][k]
  53. hidden_deltas[j] = self.dsigmoid(self.ah[j]) * error
  54. # 更新输出层权重
  55. for j in range(self.nh): # 反向传播算法,求出每个节点的误差后,反向更新权重
  56. for k in range(self.no):
  57. change = output_deltas[k] * self.ah[j]
  58. self.wo[j][k] = self.wo[j][k] + lr * change
  59. # 更新输入层权重
  60. for i in range(self.ni):
  61. for j in range(self.nh):
  62. change = hidden_deltas[j] * self.ai[i]
  63. self.wi[i][j] = self.wi[i][j] + lr * change
  64. # 计算误差
  65. error = 0.0
  66. for k in range(self.no):
  67. error += 0.5 * (targets[k] - np.round_(self.ao[k])) ** 2
  68. return error
  69. #用测试集输出准确率
  70. def test(self, patterns):
  71. count = 0
  72. num=0
  73. for p in patterns:
  74. target = p[1]
  75. result = self.update(p[0])
  76. print(p[0], ':', target, '->', np.round_(result))
  77. num=0
  78. for k in range(self.no):
  79. if (target[k] == np.round_(result[k])):
  80. num +=1
  81. print(num)
  82. if num==3:
  83. count +=1
  84. print("******************",(target) == (np.round_(result)),"******************")
  85. accuracy = int(float(count / len(patterns))*100)
  86. print('accuracy: %-.9f' % accuracy,"%")
  87. #输出训练过后神经网络的权重矩阵
  88. def weights(self):
  89. print('输入层权重:')
  90. for i in range(self.ni):
  91. print(self.wi[i])
  92. print()
  93. print('输出层权重:')
  94. for j in range(self.nh):
  95. print(self.wo[j])
  96. #用训练集训练神经网络
  97. def train(self, patterns, iterations=1000, lr=0.1):
  98. for i in range(iterations):
  99. error = 0.0
  100. for p in patterns:
  101. inputs = p[0]
  102. targets = p[1]
  103. self.update(inputs)
  104. error = error + self.backPropagate(targets, lr)
  105. if i % 100 == 0:
  106. print("percent:",int(i/iterations*100),"%",' error: %-.9f' % error)
  107. #生成区间[a, b)内的随机数
  108. def rand(self, a, b):
  109. return (b - a) * random.random() + a
  110. # 生成大小 I*J 的矩阵,默认零矩阵
  111. def makeMatrix(self, I, J, fill=0.0):
  112. m = []
  113. for i in range(I):
  114. m.append([fill] * J)
  115. return m
  116. # 函数 sigmoid,bp神经网络前向传播的激活函数
  117. def sigmoid(self, x):
  118. return 1.0 / (1.0 + math.exp(-x))
  119. # 函数 sigmoid 的导数,反向传播时使用
  120. def dsigmoid(self, x):
  121. return x * (1 - x)
  122. if __name__ == '__main__':
  123. data = []
  124. raw = pd.read_csv('iris.csv')
  125. raw_data = raw.values
  126. raw_feature = raw_data[1:, 1:5]
  127. for i in range(len(raw_feature)):
  128. ele = []
  129. ele.append(list(raw_feature[i]))
  130. if raw_data[i][5] == 0:
  131. ele.append([0, 0,1])
  132. elif raw_data[i][5] == 1:
  133. ele.append([0,1, 0])
  134. elif raw_data[i][5] == 2:
  135. ele.append([1, 1,1])
  136. else:
  137. ele.append([0, 0,0])
  138. data.append(ele)
  139. nn = NN(4, 10, 3)
  140. training = data[1:100]
  141. test = data[101:]
  142. nn.train(training, iterations=1000)
  143. nn.test(test)

 

79d7d1c71775053c3238c743bb761f6e.gif

 

 

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/不正经/article/detail/605196
推荐阅读
相关标签
  

闽ICP备14008679号