当前位置:   article > 正文

深度学习中常用的函数_人工数据集 其他函数

人工数据集 其他函数

下面介绍深度学习中常用的函数及其代码实现:

1.恒等函数

  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3. def identity_function(x):
  4. return x
  5. x = np.arange(0.0, 5.0, 0.1)
  6. y = identity_function(x)
  7. plt.plot(x, y)
  8. plt.xlabel('x')
  9. plt.ylabel('y')
  10. plt.show()

2.阶跃函数

  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3. def step_function(x):
  4. return np.array(x > 0, dtype=np.int)
  5. x = np.arange(-5.0, 5.0, 0.1)
  6. y = step_fucntion(x)
  7. plt.plot(x, y)
  8. plt.xlabel('x')
  9. plt.ylabel('y')
  10. plt.show()

3.sigmoid函数

  1. import numpy as np
  2. import matplotlib.pyplot as plt
  3. def sigmoid(x):
  4. return 1 / (1 + np.exp(-x))
  5. x = np.arange(-5.0, 5.0, 0.1)
  6. y = sigmoid(x)
  7. plt.plot(x, y)
  8. plt.xlabel('x')
  9. plt.ylabel('y')
  10. plt.show()

4. sigmoid_grad函数

  1. import numpy as np
  2. import matplotpli.pyplot as plt
  3. def sigmoid(x):
  4. return 1 / (1 + np.exp(-x)
  5. def sigmoid_grad(x):
  6. return (1.0 - sigmoid(x)) * sigmoid(x)
  7. x = np.arange(-5.0, 5.0, 0.1)
  8. y = sigmoid_grad(x)
  9. plt.plot(x, y)
  10. plt.xlabel('x'
  11. plt.ylabel('y')
  12. plt.show()

5.relu函数

  1. import numpy as np
  2. import matplotplib.pyplot as plt
  3. def relu(x):
  4. return np.maximum(0, x)
  5. x = np.arange(-5.0, 5.0, 0.1)
  6. y = relu(x)
  7. plt.plot(x, y)
  8. plt.xlabel('x')
  9. plt.ylabel('y')
  10. plt.show()

 6.relu_grad函数

  1. import numpy as np
  2. def relu_grad(x):
  3. grad = np.zeros(x)
  4. grad[x>=] = 1
  5. return grad

7.softmax函数

  1. import numpy as np
  2. def softmax(x):
  3. if x.ndim == 2:
  4. x = x.T
  5. x = x - np.max(x, axis=0)
  6. y = np.exp(x) / np.sum(np.exp(x), axis=0)
  7. return y.T
  8. x = x - np.max(x)
  9. return np.exp(x) / np.sum(np.exp(x))

8.mean_squared_error函数

  1. import numpy as np
  2. def mean_squared_error(y, x):
  3. return 0.5 * np.sum((y-t)**2)

9.cross_entropy_error函数

  1. import numpy as np
  2. def cross_entropy_error(y, t):
  3. if y.ndim == 1:
  4. t = t.reshape(1, t.size)
  5. y = y.reshape(1, y.size)
  6. if t.size == y.size:
  7. t = t.argmax(axis=1)
  8. batch_size = y.shape[0]
  9. return -np.sum(np.log(y[np.arange(batch_size), t] + 1e-7)) / batch_size

10.softmax_loss函数

  1. import numpy as np
  2. def softmax_loss(X, t):
  3. y = softmax(X)
  4. return cross_entropy_error(y, t)

声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/weixin_40725706/article/detail/333783
推荐阅读
相关标签
  

闽ICP备14008679号