赞
踩
import torch as t
import torch.nn as nn
import torch.nn.functional as F
import matplotlib.pyplot as plt
x= t.linspace(-6,6,1000)
f = nn.Sigmoid()
plt.plot(x,f(x))
plt.show()
2.
t
a
n
h
(
x
)
=
e
x
−
e
−
x
e
x
+
e
−
x
tanh(x) = \frac{e^x-e^{-x}}{e^x+e^{-x}}
tanh(x)=ex+e−xex−e−x
f = nn.Tanh()
plt.plot(x, f(x))
plt.show()
3.
R
e
L
u
(
x
)
=
m
a
x
(
0
,
x
)
ReLu(x) = max(0,x)
ReLu(x)=max(0,x)
缺点:神经元坏死现象,某些神经元可能永远不会被激活
f = nn.ReLU()
plt.plot(x,f(x))
plt.show()
4.
L
e
a
k
y
R
e
L
U
(
x
)
=
m
a
x
(
0
,
x
)
+
n
e
g
a
t
i
v
e
_
s
l
o
p
e
∗
m
i
n
(
0
,
x
)
LeakyReLU(x) = max(0,x) + negative\_slope * min(0,x)
LeakyReLU(x)=max(0,x)+negative_slope∗min(0,x)
negative_slope为负的斜率常数,默认为-0.01
f = t.nn.LeakyReLU(negative_slope=0.1)
plt.plot(x,f(x))
plt.show()
5.
P
R
e
L
U
(
x
)
=
m
a
x
(
0
,
x
)
+
a
∗
m
i
n
(
0
,
x
)
PReLU(x)=max(0,x)+a*min(0,x)
PReLU(x)=max(0,x)+a∗min(0,x)
负斜率a可学习
f = t.nn.PReLU(num_parameters= 1, init= 0.25)
plt.plot(x,f(x).detach())
plt.show()
6.
E
L
U
(
x
)
=
m
a
x
(
0
,
x
)
+
m
i
n
(
0
,
α
(
e
x
−
1
)
)
ELU(x) = max(0,x) + min(0, \alpha(e^{x}-1))
ELU(x)=max(0,x)+min(0,α(ex−1))
f = t.nn.ELU(alpha=0.5)
plt.plot(x,f(x))
plt.show()
7.
S
E
L
U
(
x
)
=
s
c
a
l
e
(
m
a
x
(
0
,
x
)
)
+
m
i
n
(
0
,
α
(
e
x
/
α
−
1
)
)
SELU(x) = scale(max(0,x))+ min(0,\alpha(e^{x/\alpha}-1))
SELU(x)=scale(max(0,x))+min(0,α(ex/α−1))
f = t.nn.SELU()
plt.plot(x,f(x))
plt.show()
8.
T
a
n
h
s
h
r
i
n
k
(
x
)
=
x
−
T
a
n
h
(
x
)
Tanhshrink(x)=x−Tanh(x)
Tanhshrink(x)=x−Tanh(x)
f = t.nn.Tanhshrink()
plt.plot(x,f(x))
plt.show()
9.
S
o
f
t
p
l
u
s
(
x
)
=
1
β
l
o
g
(
1
+
e
β
x
)
Softplus(x)= \frac{1}{\beta}log(1+e^{\beta x})
Softplus(x)=β1log(1+eβx)
ReLU 的变体,较为光滑
f = t.nn.Softplus(beta=1.)
plt.plot(x,f(x))
x2 = t.linspace(0,6,500)
plt.plot(x2,x2,ls='--',c='k')
plt.plot(-x2, t.zeros_like(x2), ls='--', c='k')
plt.show()
10.
s
o
f
t
s
i
g
n
(
x
)
=
x
1
+
∣
x
∣
softsign(x) = \frac{x}{1+|x|}
softsign(x)=1+∣x∣x
plt.plot(x,F.softsign(x))
plt.show()
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。