赞
踩
方式1:手推写神经元节点的前向、后向计算(参数更新) import numpy as np _w = [0.1, 0.15, 0.2, 0.25, 0.3, 0.35, 0.4, 0.45, 0.5, 0.55, 0.6, 0.65] _b = [0.35, 0.65] _x = [5, 10] _y = [0.01, 0.09] lr = 0.5 def w(index): return _w[index - 1] def x(index): return _x[index - 1] def b(index): return _b[index - 1] def y(index): return _y[index - 1] def set_w(index, gd): _w[index - 1] = _w[index - 1] - lr * gd def sigmoid(z): return 1.0 / (1 + np.exp(-z)) def training(): # 前向传播节点计算 neth1 = w(1) * x(1) + w(2) * x(2) + b(1) outh1 = sigmoid(neth1) neth2 = w(3) * x(1) + w(4) * x(2) + b(1) outh2 = sigmoid(neth2) neth3 = w(5) * x(1) + w(6) * x(2) + b(1) outh3 = sigmoid(neth3) neto1 = w(7) * outh1 + w(9) * outh2 + w(11) * outh3 + b(2) outo1 = sigmoid(neto1) neto2 = w(8) * outh1 + w(10) * outh2 + w(12) * outh3 + b(2) outo2 = sigmoid(neto2) loss = 0.5 * (y(1) - outo1) ** 2 + 0.5 * (y(2) - outo2) ** 2 ### t1 = (outo1-y(1)) * outo1 * (1-outo1) t2 = (outo2-y(2)) * outo2 * (1-outo2) # 反向过程,更新参数 # 对w1的梯度更新 ---> 损失对neth1的求导*neth1对w1的求导 # 损失对neth1的求导 ---> 损失对outh1求导*outh1对neth1的求导 # 损失对outh1的求导 ---> 损失对outo1求导*outo1对neto1求导*neto1对outh1的求导+损失对outo2的求导*outo2对neto2的求导*neto2对outh1的求导 # 损失对outo1求导---> 普通求导 # 损失对outo2求导 ---> 普通求导 set_w(1, ((t1 * w(7) + t2 * w(8)) * outh1 * (1-outh1)) * x(1)) # 对w2的梯度更新 ---> 损失对neth1的求导*neth1对w2的求导 # 损失对neth1的求导 ---> 损失对outh1求导*outh1对neth1的求导 # 损失对outh1的求导 ---> 损失对outo1求导*outo1对neto1求导*neto1对outh1的求导+损失对outo2的求导*outo2对neto2的求导*neto2对outh1的求导 # 损失对outo1求导---> 普通求导 # 损失对outo2求导 ---> 普通求导 set_w(2, ((t1 * w(7) + t2 * w(8)) * outh1 * (1 - outh1)) * x(2)) # 对w3的梯度更新 ---> 损失对neth2的求导*neth2对w3的求导 # 损失对neth2的求导 ---> 损失对outh2求导*outh2对neth2的求导 # 损失对outh2的求导 ---> 损失对outo1求导*outo1对neto1求导*neto1对outh2的求导+损失对outo2的求导*outo2对neto2的求导*neto2对outh2的求导 # 损失对outo1求导---> 普通求导 # 损失对outo2求导 ---> 普通求导 set_w(3, (t1 * w(9) + t2 * w(10)) * outh2 * (1-outh2) * x(1)) # 对w4的梯度更新 ---> 总损失对outh2的求导*outh2对neth2的求导neth2对w4的求导 # 损失对outh2的求导---> 损失o1对outo1的求导*outo1对neto1的求导*neto1对outh2的求导 + 损失o2对outo2的求导*outo2对neto2的求导*neto2对outh2的求导 # 损失o1对outo1的求导---> 普通求导 # 损失o2对outo2的求导---> 普通求导 set_w(4, (t1 * w(9) + t2 * w(10)) * outh2 * (1-outh2) * x(2)) ###同理 set_w(5, (t1 * w(11) + t2 * w(12)) * outh3 * (1-outh3) * x(1)) set_w(6, (t1 * w(11) + t2 * w(12)) * outh3 * (1 - outh3) * x(2)) set_w(7, t1 * outh1) set_w(8, t2 * outh1) set_w(9, t1 * outh2) set_w(10, t2 * outh2) set_w(11, t1 * outh3) set_w(12, t2 * outh3) return loss if __name__ == '__main__': # print("你好") print(_w) print('First:', training()) for i in range(1000): r = training() print('training 1000:', r) print(_w)
方式2:采用numpy科学计算库编辑前向、后向计算(参数更新)
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。