当前位置:   article > 正文

强化学习(实践):REINFORCE,AC,TRPPO,PPO_强化学习utils

强化学习utils

1,REINFORCE

在车杆环境中进行 REINFORCE 算法的实验:

  1. import gym
  2. import torch
  3. import torch.nn.functional as F
  4. import numpy as np
  5. import matplotlib.pyplot as plt
  6. from tqdm import tqdm
  7. import rl_utils

首先定义策略网络 PolicyNet,其输入是某个状态,输出则是该状态下的动作概率分布,这里采用在离散动作空间上的 softmax()函数来实现一个可学习的多项分布。

  1. class PolicyNet(torch.nn.Module):
  2. def __init__(self, state_dim, hidden_dim, action_dim):
  3. super(PolicyNet, self).__init__()
  4. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  5. self.fc2 = torch.nn.Linear(hidden_dim, action_dim)
  6. def forward(self, x):
  7. x = F.relu(self.fc1(x))
  8. return F.softmax(self.fc2(x), dim=1)

再定义我们的 REINFORCE 算法。在函数take_action()函数中,我们通过动作概率分布对离散的动作进行采样。在更新过程中,我们按照算法将损失函数写为策略回报的负数,即,对求导后就可以通过梯度下降来更新策略。

  1. class REINFORCE:
  2. def __init__(self, state_dim, hidden_dim, action_dim, learning_rate, gamma, device):
  3. self.policy_net = PolicyNet(state_dim, hidden_dim,action_dim).to(device)
  4. self.optimizer = torch.optim.Adam(self.policy_net.parameters(),lr=learning_rate) # 使用Adam优化器
  5. self.gamma = gamma # 折扣因子
  6. self.device = device
  7. def take_action(self, state): # 根据动作概率分布随机采样
  8. state = torch.tensor([state], dtype=torch.float).to(self.device)
  9. probs = self.policy_net(state)
  10. action_dist = torch.distributions.Categorical(probs)
  11. action = action_dist.sample()
  12. return action.item()
  13. def update(self, transition_dict):
  14. reward_list = transition_dict['rewards']
  15. state_list = transition_dict['states']
  16. action_list = transition_dict['actions']
  17. G = 0
  18. self.optimizer.zero_grad()
  19. for i in reversed(range(len(reward_list))): # 从最后一步算起
  20. reward = reward_list[i]
  21. state = torch.tensor([state_list[i]],dtype=torch.float).to(self.device)
  22. action = torch.tensor([action_list[i]]).view(-1, 1).to(self.device)
  23. log_prob = torch.log(self.policy_net(state).gather(1, action))
  24. G = self.gamma * G + reward
  25. loss = -log_prob * G # 每一步的损失函数
  26. loss.backward() # 反向传播计算梯度
  27. self.optimizer.step() # 梯度下降
  1. learning_rate = 1e-3
  2. num_episodes = 1000
  3. hidden_dim = 128
  4. gamma = 0.98
  5. device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  6. env_name = "CartPole-v0"
  7. env = gym.make(env_name)
  8. env.seed(0)
  9. torch.manual_seed(0)
  10. state_dim = env.observation_space.shape[0]
  11. action_dim = env.action_space.n
  12. agent = REINFORCE(state_dim, hidden_dim, action_dim, learning_rate, gamma,device)
  13. return_list = []
  14. for i in range(10):
  15. with tqdm(total=int(num_episodes / 10), desc='Iteration %d' % i) as pbar:
  16. for i_episode in range(int(num_episodes / 10)):
  17. episode_return = 0
  18. transition_dict = {
  19. 'states': [],
  20. 'actions': [],
  21. 'next_states': [],
  22. 'rewards': [],
  23. 'dones': []
  24. }
  25. state = env.reset()
  26. env.render()
  27. done = False
  28. while not done:
  29. action = agent.take_action(state)
  30. next_state, reward, done, _ = env.step(action)
  31. transition_dict['states'].append(state)
  32. transition_dict['actions'].append(action)
  33. transition_dict['next_states'].append(next_state)
  34. transition_dict['rewards'].append(reward)
  35. transition_dict['dones'].append(done)
  36. state = next_state
  37. episode_return += reward
  38. return_list.append(episode_return)
  39. agent.update(transition_dict)
  40. if (i_episode + 1) % 10 == 0:
  41. pbar.set_postfix({
  42. 'episode':
  43. '%d' % (num_episodes / 10 * i + i_episode + 1),
  44. 'return':
  45. '%.3f' % np.mean(return_list[-10:])
  46. })
  47. pbar.update(1)

在 CartPole-v0 环境中,满分就是 200 分,我们发现 REINFORCE 算法效果很好,可以达到 200 分。接下来我们绘制训练过程中每一条轨迹的回报变化图。由于回报抖动比较大,往往会进行平滑处理。

  1. episodes_list = list(range(len(return_list)))
  2. plt.plot(episodes_list, return_list)
  3. plt.xlabel('Episodes')
  4. plt.ylabel('Returns')
  5. plt.title('REINFORCE on {}'.format(env_name))
  6. plt.show()
  7. mv_return = rl_utils.moving_average(return_list, 9)
  8. plt.plot(episodes_list, mv_return)
  9. plt.xlabel('Episodes')
  10. plt.ylabel('Returns')
  11. plt.title('REINFORCE on {}'.format(env_name))
  12. plt.show()

可以看到,随着收集到的轨迹越来越多,REINFORCE 算法有效地学习到了最优策略。不过,相比于前面的 DQN 算法,REINFORCE 算法使用了更多的序列,这是因为 REINFORCE 算法是一个在线策略算法,之前收集到的轨迹数据不会被再次利用。此外,REINFORCE 算法的性能也有一定程度的波动,这主要是因为每条采样轨迹的回报值波动比较大,这也是 REINFORCE 算法主要的不足。

2,Actor-Critic算法

仍然在 Cartpole 环境上进行 Actor-Critic 算法的实验。

  1. import gym
  2. import torch
  3. import torch.nn.functional as F
  4. import numpy as np
  5. import matplotlib.pyplot as plt
  6. import rl_utils

定义我们的策略网络 PolicyNet,与 REINFORCE 算法中一样。

  1. class PolicyNet(torch.nn.Module):
  2. def __init__(self, state_dim, hidden_dim, action_dim):
  3. super(PolicyNet, self).__init__()
  4. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  5. self.fc2 = torch.nn.Linear(hidden_dim, action_dim)
  6. def forward(self, x):
  7. x = F.relu(self.fc1(x))
  8. return F.softmax(self.fc2(x),dim=1)

Actor-Critic 算法中额外引入一个价值网络,接下来的代码定义我们的价值网络 ValueNet,输入是状态,输出状态的价值。

  1. class ValueNet(torch.nn.Module):
  2. def __init__(self, state_dim, hidden_dim):
  3. super(ValueNet, self).__init__()
  4. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  5. self.fc2 = torch.nn.Linear(hidden_dim, 1)
  6. def forward(self, x):
  7. x = F.relu(self.fc1(x))
  8. return self.fc2(x)

再定义我们的 ActorCritic 算法。主要包含采取动作和更新网络参数两个函数。

  1. class ActorCritic:
  2. def __init__(self, state_dim, hidden_dim, action_dim, actor_lr, critic_lr, gamma, device):
  3. self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
  4. self.critic = ValueNet(state_dim, hidden_dim).to(device) # 价值网络
  5. self.actor_optimizer = torch.optim.Adam(self.actor.parameters(), lr=actor_lr)
  6. self.critic_optimizer = torch.optim.Adam(self.critic.parameters(), lr=critic_lr) # 价值网络优化器
  7. self.gamma = gamma
  8. def take_action(self, state):
  9. state = torch.tensor([state], dtype=torch.float)
  10. probs = self.actor(state)
  11. action_dist = torch.distributions.Categorical(probs)
  12. action = action_dist.sample()
  13. return action.item()
  14. def update(self, transition_dict):
  15. states = torch.tensor(transition_dict['states'], dtype=torch.float)
  16. actions = torch.tensor(transition_dict['actions']).view(-1, 1)
  17. rewards = torch.tensor(transition_dict['rewards'], dtype=torch.float).view(-1, 1)
  18. next_states = torch.tensor(transition_dict['next_states'], dtype=torch.float)
  19. dones = torch.tensor(transition_dict['dones'], dtype=torch.float).view(-1, 1)
  20. td_target = rewards + self.gamma * self.critic(next_states) * (1 - dones) # 时序差分目标
  21. td_delta = td_target - self.critic(states) # 时序差分误差
  22. log_probs = torch.log(self.actor(states).gather(1, actions))
  23. actor_loss = torch.mean(-log_probs * td_delta.detach())
  24. critic_loss = torch.mean(F.mse_loss(self.critic(states), td_target.detach())) # 均方误差损失函数
  25. self.actor_optimizer.zero_grad()
  26. self.critic_optimizer.zero_grad()
  27. actor_loss.backward() # 计算策略网络的梯度
  28. critic_loss.backward() # 计算价值网络的梯度
  29. self.actor_optimizer.step() # 更新策略网络参数
  30. self.critic_optimizer.step() # 更新价值网络参数
  1. actor_lr = 1e-3
  2. critic_lr = 1e-2
  3. num_episodes = 1000
  4. hidden_dim = 128
  5. gamma = 0.98
  6. device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  7. env_name = 'CartPole-v0'
  8. env = gym.make(env_name)
  9. env.seed(0)
  10. torch.manual_seed(0)
  11. state_dim = env.observation_space.shape[0]
  12. action_dim = env.action_space.n
  13. agent = ActorCritic(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, gamma, device)
  14. return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes)
  1. episodes_list = list(range(len(return_list)))
  2. plt.plot(episodes_list, return_list)
  3. plt.xlabel('Episodes')
  4. plt.ylabel('Returns')
  5. plt.title('Actor-Critic on {}'.format(env_name))
  6. plt.show()
  7. mv_return = rl_utils.moving_average(return_list, 9)
  8. plt.plot(episodes_list, mv_return)
  9. plt.xlabel('Episodes')
  10. plt.ylabel('Returns')
  11. plt.title('Actor-Critic on {}'.format(env_name))
  12. plt.show()

根据实验结果我们发现,Actor-Critic 算法很快便能收敛到最优策略,并且训练过程非常稳定,抖动情况相比 REINFORCE 算法有了明显的改进,这多亏了价值函数的引入减小了方差。

3,TRPO

3.1,算法概述

基于策略的方法:策略梯度和AC算法。这些算法虽然简单、直观,但在实际应用过程中会遇到训练不稳定的情况。基于策略的方法中参数化智能体策略,并设计衡量策略好坏的目标函数,通过梯度上升的方法来最大化这个目标函数,使得策略最优。具体来说:假设 \theta 表示策略 \pi_{\theta} 的参数,定义 J(\theta)=E_{s_0}\left [ V^{\pi_{\theta}}(s_0) \right ]=E_{\pi_{\theta}}\left [ \sum_{t=0}^{\infty }\gamma^tr(s_t,a_t)\right ],基于策略的方法(如REINFORCE)的目标是找到 \theta^{*}=arg\,\underset{\theta}{max}\,J(\theta),策略梯度算法主要沿着 \triangledown _{\theta}J(\theta) 方向迭代更新策略参数 \theta,更新方式为:\theta_{new}=\theta_{old}+\alpha \triangledown _{\theta}J。但是这种算法有一个明显的缺点:当策略网络是深度模型时,沿着梯度策略梯度更新参数 \theta ,很有可能由于步长太长,策略突然显著变差,进而影响训练结果。

针对上面的问题,考虑在更新时找到一块信任区域(Trust Region),在这个区域上更新策略时能够得到某种策略性能的安全保证,这就是信任区域策略优化(Truest Region Policy Optimization,TRPO)算法的主要思想。TRPO算法在理论上能够保证策略学习的性能单调性,并在实际应用中取得了比策略梯度算法更好的效果。

3.2,策略目标

假设当前策略为 \pi_{\theta},参数为 \theta考虑如何借助当前的 \theta 找到一个更优的参数 \theta^{'},是使得 J(\theta^{'})\geqslant J(\theta)具体来说,由于初始状态 s_0 的分布和策略无关,因此上述策略 \pi_{\theta} 下的优化目标 J(\theta) 可以写成新策略 \pi_{\theta^{'}} 下的期望形式:

J(\theta)=E_{s_0}\left [ V^{\pi_{\theta}}(s_0) \right ]

=E_{\pi_{\theta^{'}}}\left [ \sum_{t=0}^{\infty }\gamma ^tV^{\pi_{\theta}}(s_t)-\sum_{t=1}^{\infty } \gamma^tV^{\pi_{\theta}}(s_t)\right ]

=-E_{\pi_{\theta^{'}}}\left [ \sum_{t=0}^{\infty }\gamma ^t\left ( \gamma V^{\pi_{\theta}}(s_t)-V^{\pi_{\theta}}(s_t) \right )\right ]

基于以上等式,可以推导出新旧策略的目标函数之间的差距:

J(\theta^{'})-J(\theta)=E_{s_0}\left [ V^{\pi_{\theta^{'}}}(s_0)- V^{\pi_{\theta}}(s_0)\right ]

=E_{\pi_{\theta^{'}}}\left [ \sum_{t=0}^{\infty } \gamma^tr(s_t,a_t)\right ]+E_{\pi_{\theta^{'}}}\left [ \sum_{t=0}^{\infty }\gamma^t\left ( \gamma V^{\pi_{\theta}}(s_{t+1})-V^{\pi_{\theta}} (s_t)\right ) \right ]

=E_{\pi_{\theta^{'}}}\left [ \sum_{t=0}^{\infty } \gamma^t\left [ r(s_t,a_t)+ \gamma V^{\pi_{\theta}}(s_{t+1})-V^{\pi_{\theta}} (s_t) \right ]\right ]

将时序差分残差定义为优势函数 A(确定性策略中的一种思想):

=E_{\pi_{\theta}}\left [ \sum_{t=0}^{\infty }\gamma^tA^{\pi_{\theta}}(s_t,a_t) \right ]

=\sum_{t=0}^{\infty }\gamma^tE_{s_t\sim P_t^{\pi_{\theta^{'}}}}E_{a_t\sim\pi_{\theta^{'}}(\cdot|s_t)}\left [ A^{\pi_{\theta}}(s_t,a_t) \right ]

=\frac{1}{1-\gamma}E_{s\sim v^{\pi_{\theta^{'}}}}E_{a\sim \pi_{\theta^{'}}(\cdot|s)}\left [ A^{\pi_{\theta}}(s,a) \right ]

最后一个等号的成立用到了状态访问分布的定义:v^{\pi}(s)=(1-\gamma)\sum_{t=0}^{\infty }(1-\gamma)\gamma^{t}P_t^{\pi}(s),所以只需要找个一个策略,使得 E_{s\sim v^{\pi_{\theta^{'}}}}E_{a\sim \pi_{\theta^{'}}(\cdot|s)}\left [ A^{\pi_{\theta}}(s,a) \right ]\geqslant 0,就能保证策略性能单调递增,即 J(\theta^{'})\geqslant J(\theta)

但是直接求解非常困难,因为 \pi_{\theta^{'}} 是我们需要求解的策略,但我们又要用它来收集样本。把所有的新策略都拿来收集数据,然后判断哪个策略满足上述条件的做法显然是不现实的。于是TRPO做了一步近似操作,对状态访问分布进行了相应的处理。具体而言,忽略两个策略之间的状态访问分布变化,直接采用旧的策略 \pi_{\theta} 的状态分布,定义如下替代优化目标:

L_{\theta}(\theta^{'})=J(\theta)+\frac{1}{1-\gamma}E_{s\sim v^{\pi_{\theta}}}E_{a\sim \pi_{\theta^{'}}(\cdot|s)}\left [ A^{\pi_{\theta}} (s,a)\right ]

当新旧策略非常接近时,状态访问分布变化很小,这样的近似是合理的。其中,动作仍然使用新策略 \pi_{\theta^{'}} 采样得到,可以用重要性采样对动作分布进行处理:

L_{\theta}(\theta^{'})=J(\theta)+E_{s\sim v^{\pi_{\theta}}}E_{a\sim \pi_{\theta}(\cdot|s)}\left [ \frac{\pi_{\theta^{'}}(a|s)}{\pi_{\theta}(a|s)}A^{\pi_{\theta}} (s,a)\right ]

这样,我们就可以基于旧策略 \pi_{\theta} 已经采样出的数据来估计并优化新策略 \pi_{\theta^{'}} 了。为了保证新旧策略足够接近,TRPO使用了库尔贝克-莱布勒(KL)散度来衡量策略之间的距离,并给出了整体优化公式:

\underset{\theta^{'}}{max}\,L_{\theta}(\theta^{'}) 

使得 E_{s\sim v^{\pi\theta_k}}\left [ D_{KL}\left ( \pi_{\theta_k}(\cdot|s),\pi_{\theta^{'}(\cdot|s)} \right ) \right ]\leqslant \delta

这里不等式约束定义了策略空间中的一个库尔贝克-莱布勒球,称为信任区域。在这个区域中,可以认为当前学习策略和环境交互的状态分布与上一轮策略最后采样的状态分布一致,进而可以基于一步动作的重要性采样方法使当前学习策略稳定上升。

3.3,近似求解

直接求解上述带约束的优化问题比较麻烦,TRPO在其具体实现中做了一步近似操作来快速求解。为了方便起见,在接下来公式中使用 \theta_k 代替之前的 \theta ,表示第 k 次迭代之后的策略。首先对目标函数和约束在 \theta_k 进行泰勒展开,分别用 1阶、2阶进行近似:

E_{s\sim v^{\pi_{\theta_k}}}E_{a \sim \pi_ {\theta_k}(\cdot|s)}\left [ \frac{\pi_{\theta^{'}}(a|s)}{\pi_{\theta_k}(a|s)}A^{\pi_{\theta_k}}(s,a) \right ]\approx g^T(\theta^{'}-\theta_k)

E_{s\sim v^{\pi_{\theta_k}}}\left [ D_{KL}\left ( \pi_{\theta_k(\cdot|s)} \right )\right ]\approx \frac{1}{2}(\theta^{'}-\theta_k)^TH(\theta^{'}-\theta_k)

其中 g=\triangledown_{\theta^{'}}E_{s\sim v^{\pi\theta_k}}E_{a\sim \pi_{\theta_k}(\cdot|s)}\left [ \frac{\pi_{\theta^{'}}(a|s)}{\pi_{\theta_{k}}(a|s)}A^{\pi_{\theta_k}}(s,a) \right ] 表示目标函数的梯度

H=H\left [ E_{s\sim v^{\pi\theta_{k}}}\left [ D_{KL}\left ( \pi_{\theta_k}(\cdot|s) ,\pi_{\theta^{'}}(\cdot|s) \right )\right ] \right ] 表示策略之间平均 KL 距离的Hessian matrix。

进而优化目标变成了:

\theta_{k+1}=arg\,\underset{\theta^{'}}{max}\,g^T(\theta^{'}-\theta_k)\,\,\,s.t.\,\,\,\frac{1}{2}(\theta^{'}-\theta_k)^TH(\theta^{'}-\theta_k)\leqslant \delta

利用KKT条件直接导出上述问题的解:

\theta_{k+1}=\theta_k+\sqrt{\frac{2\delta }{g^TH^{-1}g}H^{-1}g}

3.4,共轭梯度

一般来说,用神经网络表示的策略函数的参数数量都是成千上万的,计算和存储Hessian矩阵 H 的逆矩阵会耗费大量的内存资源和时间。TRPO通过共轭梯度法回避了这个问题,它的核心思想是直接计算 x=H^{-1}gx 即参数更新方向。假设满足KL距离约束的参数更新时的最大步长为 \beta,于是,根据KL距离约束条件,有 \frac{1}{2}(\beta x)^T H(\beta x)=\delta 。求解 \beta ,得到 \beta =\sqrt{\frac{2\delta }{x^THx}} 。因此,此时参数更新方式为:

\theta_{k+1}=\theta_k+\sqrt{\frac{2\delta }{x^THx}}x

因此,只要可以计算 x=H^{-1}g ,就可以根据该式更新参数,问题转换为解 Hx=g 。实际上 H 为对称正定矩阵,所以我们可以使用共轭梯度法来求解。共轭梯度法的流程如下:

在共轭梯度运算过程中,直接计算 a_k 和 r_{k+1} 需要计算和存储Hession矩阵 H 。为了避免这种大矩阵的出现,只计算 Hx 向量,而不直接和存储 H 矩阵。这样做比较容易,因为对于任意的列向量 v ,容易验证:

Hv=\triangledown _{\theta}\left (\left ( \triangledown _{\theta}\left ( D_{KL}^{v^{\pi\theta_k}}\left ( \pi_{\theta_k},\pi_{\theta^{'}} \right ) \right ) \right ) ^T \right )v=\triangledown _{\theta}\left (\left ( \triangledown _{\theta}\left ( D_{KL}^{v^{\pi\theta_k}}\left ( \pi_{\theta_k},\pi_{\theta^{'}} \right ) \right ) \right ) ^T v\right )

即,先用梯度和向量 v 点乘后再计算梯度。

3.5,线性搜索

由于TRPO算法用到了泰勒展开的1阶和2阶近似,这并非精准求解,因此,\theta^{'} 可能未必比 \theta_k 好,或未必能满足KL散度限制。TRPO在每次迭代的最后进行一次线性搜索,以确保找到满足条件。具体来说,就是找到一个最小的非负整数 i,使得按照:

\theta_{k+1}=\theta_k+\sqrt{\frac{2\delta }{x^THx}}x

求出的 \theta_{k+1} 依然满足最初的KL散度限制,并且确实能够提升目标函数 L_{\theta_k} ,其中 \alpha \in (0,1) 是一个决定线性搜索长度的超参数。

3.6,广义优势估计

现在我们尚未得知如何估计优势函数 A 。目前比较常用的一种方法为广义优势估计(GAE)。首先 \delta _t=r_t+\gamma V(s_{t+1})-V(s_t) 表示时序差分误差,其中 V 是一个已经学习的状态价值函数。于是,根据多步时序差分的思想,有:

A_t^{(1)}=\delta _t=-V(s_t)+r_t+\gamma V(s_{t+1})

A_t^{(2)}=\delta _t+\gamma \delta _{t+1}=-V(s_t)+r_t+\gamma r_{t+1}+\gamma^2 V(s_{t+2})

A_t^{(3)}=\delta _t+\gamma \delta _{t+1}+\gamma^2 \delta _{t+2}=-V(s_t)+r_t+\gamma r_{t+1}+\gamma^2 V(s_{t+2})+\gamma ^3 V(s_{t+k})

......

A_t^{(k)}=\sum_{l=0}^{k-1}\gamma ^{l}\delta _{t+l}=-V(s_t)+r_t+\gamma r_{t+1}+...+\gamma ^{k-1}r_{t+k-1}+\gamma ^kV(s_{t+k})

然后,GAE将这些不同步数的优势估计进行指数加权平均:

A_t^{GAE}=(1-\lambda)\left ( A_t^{(1)}+\lambda A_t^{(2)}+\lambda ^2A_t^{(3)}+... \right )

=(1-\lambda)\left ( \delta_t+\lambda\left ( \delta_t+\gamma \delta_{t+1} \right )+\lambda^2\left ( \delta_t+\gamma \delta_{t+1}+\gamma^2 \delta_{t+2} \right )+... \right )

=(1-\lambda)\left ( \delta _t\left ( 1+\lambda +\lambda^2+... \right )+\gamma \delta _{t+1}\left ( \lambda+\lambda^2+\lambda^3 +...\right )+\gamma^2\delta_{t+2}\left ( \lambda ^2+ \lambda ^3+ \lambda ^4+... \right ) \right )

=(1-\lambda)\left ( \delta _t\frac{1}{1-\lambda}+\gamma \delta _{t+1}\frac{\lambda}{1-\lambda}+\gamma ^2\delta_{t+2}\frac{\lambda^2}{1-\lambda} \right )

=\sum_{t=0}^{\infty }(\gamma \lambda)^l\delta_{t+1}

其中,\lambda \in [0,1] 是在GAE中额外引入的一个超参数。

  • 当 \lambda=0时,A_t^{GAE}=\delta_t=r_t+\gamma V(s_{t+1})-V(s_t),即看一步差分得到的优势。
  • 当 \lambda=1时,A_i^{GAE}=\sum_{l=0}^{\infty }\gamma ^t \delta_{t+1}=\sum_{i=0}^{\infty }\gamma ^lr_{t+1}-V(s_t),则看每一步差分得到的优势的完全平均值。
  1. def compute_advantage(gamma, lmbda, td_delta):
  2. td_delta = td_delta.detach().numpy()
  3. advantage_list = []
  4. advantage = 0.0
  5. for delta in td_delta[::-1]:
  6. advantage = gamma * lmbda * advantage + delta
  7. advantage_list.append(advantage)
  8. advantage_list.reverse()
  9. return torch.tensor(advantage_list, dtype=torch.float)

3.7,TRPO实现

离散动作空间(车杆环境):

  1. import torch
  2. import numpy as np
  3. import gym
  4. import matplotlib.pyplot as plt
  5. import torch.nn.functional as F
  6. import rl_utils
  7. import copy
  8. def compute_advantage(gamma, lmbda, td_delta):
  9. td_delta = td_delta.detach().numpy()
  10. advantage_list = []
  11. advantage = 0.0
  12. for delta in td_delta[::-1]:
  13. advantage = gamma * lmbda * advantage + delta
  14. advantage_list.append(advantage)
  15. advantage_list.reverse()
  16. return torch.tensor(advantage_list, dtype=torch.float)
  17. class PolicyNet(torch.nn.Module):
  18. def __init__(self, state_dim, hidden_dim, action_dim):
  19. super(PolicyNet, self).__init__()
  20. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  21. self.fc2 = torch.nn.Linear(hidden_dim, action_dim)
  22. def forward(self, x):
  23. x = F.relu(self.fc1(x))
  24. return F.softmax(self.fc2(x), dim=1)
  25. class ValueNet(torch.nn.Module):
  26. def __init__(self, state_dim, hidden_dim):
  27. super(ValueNet, self).__init__()
  28. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  29. self.fc2 = torch.nn.Linear(hidden_dim, 1)
  30. def forward(self, x):
  31. x = F.relu(self.fc1(x))
  32. return self.fc2(x)
  33. class TRPO:
  34. """ TRPO算法 """
  35. def __init__(self, hidden_dim, state_space, action_space, lmbda,
  36. kl_constraint, alpha, critic_lr, gamma, device):
  37. state_dim = state_space.shape[0]
  38. action_dim = action_space.n
  39. # 策略网络参数不需要优化器更新
  40. self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
  41. self.critic = ValueNet(state_dim, hidden_dim).to(device)
  42. self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),lr=critic_lr)
  43. self.gamma = gamma
  44. self.lmbda = lmbda # GAE参数
  45. self.kl_constraint = kl_constraint # KL距离最大限制
  46. self.alpha = alpha # 线性搜索参数
  47. self.device = device
  48. def take_action(self, state):
  49. state = torch.tensor([state], dtype=torch.float).to(self.device)
  50. probs = self.actor(state)
  51. action_dist = torch.distributions.Categorical(probs)
  52. action = action_dist.sample()
  53. return action.item()
  54. def hessian_matrix_vector_product(self, states, old_action_dists, vector):
  55. # 计算黑塞矩阵和一个向量的乘积
  56. new_action_dists = torch.distributions.Categorical(self.actor(states))
  57. kl = torch.mean(
  58. torch.distributions.kl.kl_divergence(old_action_dists,new_action_dists)) # 计算平均KL距离
  59. kl_grad = torch.autograd.grad(kl,self.actor.parameters(),create_graph=True)
  60. kl_grad_vector = torch.cat([grad.view(-1) for grad in kl_grad])
  61. # KL距离的梯度先和向量进行点积运算
  62. kl_grad_vector_product = torch.dot(kl_grad_vector, vector)
  63. grad2 = torch.autograd.grad(kl_grad_vector_product,self.actor.parameters())
  64. grad2_vector = torch.cat([grad.view(-1) for grad in grad2])
  65. return grad2_vector
  66. def conjugate_gradient(self, grad, states, old_action_dists): # 共轭梯度法求解方程
  67. x = torch.zeros_like(grad)
  68. r = grad.clone()
  69. p = grad.clone()
  70. rdotr = torch.dot(r, r)
  71. for i in range(10): # 共轭梯度主循环
  72. Hp = self.hessian_matrix_vector_product(states, old_action_dists,p)
  73. alpha = rdotr / torch.dot(p, Hp)
  74. x += alpha * p
  75. r -= alpha * Hp
  76. new_rdotr = torch.dot(r, r)
  77. if new_rdotr < 1e-10:
  78. break
  79. beta = new_rdotr / rdotr
  80. p = r + beta * p
  81. rdotr = new_rdotr
  82. return x
  83. def compute_surrogate_obj(self, states, actions, advantage, old_log_probs,actor): # 计算策略目标
  84. log_probs = torch.log(actor(states).gather(1, actions))
  85. ratio = torch.exp(log_probs - old_log_probs)
  86. return torch.mean(ratio * advantage)
  87. def line_search(self, states, actions, advantage, old_log_probs, old_action_dists, max_vec): # 线性搜索
  88. old_para = torch.nn.utils.convert_parameters.parameters_to_vector(self.actor.parameters())
  89. old_obj = self.compute_surrogate_obj(states, actions, advantage,old_log_probs, self.actor)
  90. for i in range(15): # 线性搜索主循环
  91. coef = self.alpha**i
  92. new_para = old_para + coef * max_vec
  93. new_actor = copy.deepcopy(self.actor)
  94. torch.nn.utils.convert_parameters.vector_to_parameters(new_para, new_actor.parameters())
  95. new_action_dists = torch.distributions.Categorical(new_actor(states))
  96. kl_div = torch.mean(
  97. torch.distributions.kl.kl_divergence(old_action_dists,new_action_dists))
  98. new_obj = self.compute_surrogate_obj(states, actions, advantage,old_log_probs, new_actor)
  99. if new_obj > old_obj and kl_div < self.kl_constraint:
  100. return new_para
  101. return old_para
  102. def policy_learn(self, states, actions, old_action_dists, old_log_probs,advantage): # 更新策略函数
  103. surrogate_obj = self.compute_surrogate_obj(states, actions, advantage,old_log_probs, self.actor)
  104. grads = torch.autograd.grad(surrogate_obj, self.actor.parameters())
  105. obj_grad = torch.cat([grad.view(-1) for grad in grads]).detach()
  106. # 用共轭梯度法计算x = H^(-1)g
  107. descent_direction = self.conjugate_gradient(obj_grad, states,old_action_dists)
  108. Hd = self.hessian_matrix_vector_product(states, old_action_dists,descent_direction)
  109. max_coef = torch.sqrt(2 * self.kl_constraint /(torch.dot(descent_direction, Hd) + 1e-8))
  110. new_para = self.line_search(states, actions, advantage, old_log_probs,old_action_dists,descent_direction * max_coef) # 线性搜索
  111. torch.nn.utils.convert_parameters.vector_to_parameters(new_para, self.actor.parameters()) # 用线性搜索后的参数更新策略
  112. def update(self, transition_dict):
  113. states = torch.tensor(transition_dict['states'],dtype=torch.float).to(self.device)
  114. actions = torch.tensor(transition_dict['actions']).view(-1, 1).to(self.device)
  115. rewards = torch.tensor(transition_dict['rewards'],dtype=torch.float).view(-1, 1).to(self.device)
  116. next_states = torch.tensor(transition_dict['next_states'],dtype=torch.float).to(self.device)
  117. dones = torch.tensor(transition_dict['dones'],dtype=torch.float).view(-1, 1).to(self.device)
  118. td_target = rewards + self.gamma * self.critic(next_states) * (1 -dones)
  119. td_delta = td_target - self.critic(states)
  120. advantage = compute_advantage(self.gamma, self.lmbda,td_delta.cpu()).to(self.device)
  121. old_log_probs = torch.log(self.actor(states).gather(1,actions)).detach()
  122. old_action_dists = torch.distributions.Categorical( self.actor(states).detach())
  123. critic_loss = torch.mean(F.mse_loss(self.critic(states), td_target.detach()))
  124. self.critic_optimizer.zero_grad()
  125. critic_loss.backward()
  126. self.critic_optimizer.step() # 更新价值函数
  127. # 更新策略函数
  128. self.policy_learn(states, actions, old_action_dists, old_log_probs, advantage)
  129. num_episodes = 500
  130. hidden_dim = 128
  131. gamma = 0.98
  132. lmbda = 0.95
  133. critic_lr = 1e-2
  134. kl_constraint = 0.0005
  135. alpha = 0.5
  136. device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  137. env_name = 'CartPole-v0'
  138. env = gym.make(env_name)
  139. env.seed(0)
  140. torch.manual_seed(0)
  141. agent = TRPO(hidden_dim, env.observation_space, env.action_space, lmbda, kl_constraint, alpha, critic_lr, gamma, device)
  142. return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes)
  143. episodes_list = list(range(len(return_list)))
  144. plt.plot(episodes_list, return_list)
  145. plt.xlabel('Episodes')
  146. plt.ylabel('Returns')
  147. plt.title('TRPO on {}'.format(env_name))
  148. plt.show()
  149. mv_return = rl_utils.moving_average(return_list, 9)
  150. plt.plot(episodes_list, mv_return)
  151. plt.xlabel('Episodes')
  152. plt.ylabel('Returns')
  153. plt.title('TRPO on {}'.format(env_name))
  154. plt.show()
  1. Iteration 0: 100%|██████████| 50/50 [00:16<00:00, 3.08it/s, episode=50, return=122.200]
  2. Iteration 1: 100%|██████████| 50/50 [00:17<00:00, 2.84it/s, episode=100, return=130.700]
  3. Iteration 2: 100%|██████████| 50/50 [00:26<00:00, 1.88it/s, episode=150, return=174.800]
  4. Iteration 3: 100%|██████████| 50/50 [00:24<00:00, 2.01it/s, episode=200, return=173.300]
  5. Iteration 4: 100%|██████████| 50/50 [00:32<00:00, 1.55it/s, episode=250, return=178.300]
  6. Iteration 5: 100%|██████████| 50/50 [00:33<00:00, 1.49it/s, episode=300, return=178.900]
  7. Iteration 6: 100%|██████████| 50/50 [00:28<00:00, 1.73it/s, episode=350, return=181.700]
  8. Iteration 7: 100%|██████████| 50/50 [00:29<00:00, 1.72it/s, episode=400, return=184.500]
  9. Iteration 8: 100%|██████████| 50/50 [00:22<00:00, 2.26it/s, episode=450, return=179.000]
  10. Iteration 9: 100%|██████████| 50/50 [00:22<00:00, 2.20it/s, episode=500, return=188.600]

连续动作空间(倒立摆环境):

  1. import torch
  2. import numpy as np
  3. import gym
  4. import matplotlib.pyplot as plt
  5. import torch.nn.functional as F
  6. import rl_utils
  7. import copy
  8. def compute_advantage(gamma, lmbda, td_delta):
  9. td_delta = td_delta.detach().numpy()
  10. advantage_list = []
  11. advantage = 0.0
  12. for delta in td_delta[::-1]:
  13. advantage = gamma * lmbda * advantage + delta
  14. advantage_list.append(advantage)
  15. advantage_list.reverse()
  16. return torch.tensor(advantage_list, dtype=torch.float)
  17. class ValueNet(torch.nn.Module):
  18. def __init__(self, state_dim, hidden_dim):
  19. super(ValueNet, self).__init__()
  20. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  21. self.fc2 = torch.nn.Linear(hidden_dim, 1)
  22. def forward(self, x):
  23. x = F.relu(self.fc1(x))
  24. return self.fc2(x)
  25. class PolicyNetContinuous(torch.nn.Module):
  26. def __init__(self, state_dim, hidden_dim, action_dim):
  27. super(PolicyNetContinuous, self).__init__()
  28. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  29. self.fc_mu = torch.nn.Linear(hidden_dim, action_dim)
  30. self.fc_std = torch.nn.Linear(hidden_dim, action_dim)
  31. def forward(self, x):
  32. x = F.relu(self.fc1(x))
  33. mu = 2.0 * torch.tanh(self.fc_mu(x))
  34. std = F.softplus(self.fc_std(x))
  35. return mu, std # 高斯分布的均值和标准差
  36. class TRPOContinuous:
  37. """ 处理连续动作的TRPO算法 """
  38. def __init__(self, hidden_dim, state_space, action_space, lmbda,kl_constraint, alpha, critic_lr, gamma, device):
  39. state_dim = state_space.shape[0]
  40. action_dim = action_space.shape[0]
  41. self.actor = PolicyNetContinuous(state_dim, hidden_dim,action_dim).to(device)
  42. self.critic = ValueNet(state_dim, hidden_dim).to(device)
  43. self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),lr=critic_lr)
  44. self.gamma = gamma
  45. self.lmbda = lmbda
  46. self.kl_constraint = kl_constraint
  47. self.alpha = alpha
  48. self.device = device
  49. def take_action(self, state):
  50. state = torch.tensor([state], dtype=torch.float).to(self.device)
  51. mu, std = self.actor(state)
  52. action_dist = torch.distributions.Normal(mu, std)
  53. action = action_dist.sample()
  54. return [action.item()]
  55. def hessian_matrix_vector_product(self,states,old_action_dists,vector,damping=0.1):
  56. mu, std = self.actor(states)
  57. new_action_dists = torch.distributions.Normal(mu, std)
  58. kl = torch.mean(torch.distributions.kl.kl_divergence(old_action_dists,new_action_dists))
  59. kl_grad = torch.autograd.grad(kl,self.actor.parameters(),create_graph=True)
  60. kl_grad_vector = torch.cat([grad.view(-1) for grad in kl_grad])
  61. kl_grad_vector_product = torch.dot(kl_grad_vector, vector)
  62. grad2 = torch.autograd.grad(kl_grad_vector_product,self.actor.parameters())
  63. grad2_vector = torch.cat([grad.contiguous().view(-1) for grad in grad2])
  64. return grad2_vector + damping * vector
  65. def conjugate_gradient(self, grad, states, old_action_dists):
  66. x = torch.zeros_like(grad)
  67. r = grad.clone()
  68. p = grad.clone()
  69. rdotr = torch.dot(r, r)
  70. for i in range(10):
  71. Hp = self.hessian_matrix_vector_product(states, old_action_dists,p)
  72. alpha = rdotr / torch.dot(p, Hp)
  73. x += alpha * p
  74. r -= alpha * Hp
  75. new_rdotr = torch.dot(r, r)
  76. if new_rdotr < 1e-10:
  77. break
  78. beta = new_rdotr / rdotr
  79. p = r + beta * p
  80. rdotr = new_rdotr
  81. return x
  82. def compute_surrogate_obj(self, states, actions, advantage, old_log_probs, actor):
  83. mu, std = actor(states)
  84. action_dists = torch.distributions.Normal(mu, std)
  85. log_probs = action_dists.log_prob(actions)
  86. ratio = torch.exp(log_probs - old_log_probs)
  87. return torch.mean(ratio * advantage)
  88. def line_search(self, states, actions, advantage, old_log_probs,old_action_dists, max_vec):
  89. old_para = torch.nn.utils.convert_parameters.parameters_to_vector(self.actor.parameters())
  90. old_obj = self.compute_surrogate_obj(states, actions, advantage,old_log_probs, self.actor)
  91. for i in range(15):
  92. coef = self.alpha**i
  93. new_para = old_para + coef * max_vec
  94. new_actor = copy.deepcopy(self.actor)
  95. torch.nn.utils.convert_parameters.vector_to_parameters(new_para, new_actor.parameters())
  96. mu, std = new_actor(states)
  97. new_action_dists = torch.distributions.Normal(mu, std)
  98. kl_div = torch.mean(
  99. torch.distributions.kl.kl_divergence(old_action_dists,new_action_dists))
  100. new_obj = self.compute_surrogate_obj(states, actions, advantage,old_log_probs, new_actor)
  101. if new_obj > old_obj and kl_div < self.kl_constraint:
  102. return new_para
  103. return old_para
  104. def policy_learn(self, states, actions, old_action_dists, old_log_probs,advantage):
  105. surrogate_obj = self.compute_surrogate_obj(states, actions, advantage,old_log_probs, self.actor)
  106. grads = torch.autograd.grad(surrogate_obj, self.actor.parameters())
  107. obj_grad = torch.cat([grad.view(-1) for grad in grads]).detach()
  108. descent_direction = self.conjugate_gradient(obj_grad, states,old_action_dists)
  109. Hd = self.hessian_matrix_vector_product(states, old_action_dists,descent_direction)
  110. max_coef = torch.sqrt(2 * self.kl_constraint /(torch.dot(descent_direction, Hd) + 1e-8))
  111. new_para = self.line_search(states, actions, advantage, old_log_probs,old_action_dists,descent_direction * max_coef)
  112. torch.nn.utils.convert_parameters.vector_to_parameters(new_para, self.actor.parameters())
  113. def update(self, transition_dict):
  114. states = torch.tensor(transition_dict['states'],dtype=torch.float).to(self.device)
  115. actions = torch.tensor(transition_dict['actions'],dtype=torch.float).view(-1, 1).to(self.device)
  116. rewards = torch.tensor(transition_dict['rewards'],dtype=torch.float).view(-1, 1).to(self.device)
  117. next_states = torch.tensor(transition_dict['next_states'],dtype=torch.float).to(self.device)
  118. dones = torch.tensor(transition_dict['dones'],dtype=torch.float).view(-1, 1).to(self.device)
  119. rewards = (rewards + 8.0) / 8.0 # 对奖励进行修改,方便训练
  120. td_target = rewards + self.gamma * self.critic(next_states) * (1 - dones)
  121. td_delta = td_target - self.critic(states)
  122. advantage = compute_advantage(self.gamma, self.lmbda, td_delta.cpu()).to(self.device)
  123. mu, std = self.actor(states)
  124. old_action_dists = torch.distributions.Normal(mu.detach(),std.detach())
  125. old_log_probs = old_action_dists.log_prob(actions)
  126. critic_loss = torch.mean(F.mse_loss(self.critic(states), td_target.detach()))
  127. self.critic_optimizer.zero_grad()
  128. critic_loss.backward()
  129. self.critic_optimizer.step()
  130. self.policy_learn(states, actions, old_action_dists, old_log_probs,advantage)
  131. num_episodes = 2000
  132. hidden_dim = 128
  133. gamma = 0.9
  134. lmbda = 0.9
  135. critic_lr = 1e-2
  136. kl_constraint = 0.00005
  137. alpha = 0.5
  138. device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  139. env_name = 'Pendulum-v0'
  140. env = gym.make(env_name)
  141. env.seed(0)
  142. torch.manual_seed(0)
  143. agent = TRPOContinuous(hidden_dim, env.observation_space, env.action_space,lmbda, kl_constraint, alpha, critic_lr, gamma, device)
  144. return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes)
  145. episodes_list = list(range(len(return_list)))
  146. plt.plot(episodes_list, return_list)
  147. plt.xlabel('Episodes')
  148. plt.ylabel('Returns')
  149. plt.title('TRPO on {}'.format(env_name))
  150. plt.show()
  151. mv_return = rl_utils.moving_average(return_list, 9)
  152. plt.plot(episodes_list, mv_return)
  153. plt.xlabel('Episodes')
  154. plt.ylabel('Returns')
  155. plt.title('TRPO on {}'.format(env_name))
  156. plt.show()
  1. Iteration 0: 100%|██████████| 200/200 [00:23<00:00, 8.34it/s, episode=200, return=-1245.402]
  2. Iteration 1: 100%|██████████| 200/200 [00:23<00:00, 8.57it/s, episode=400, return=-1258.636]
  3. Iteration 2: 100%|██████████| 200/200 [00:23<00:00, 8.52it/s, episode=600, return=-1195.327]
  4. Iteration 3: 100%|██████████| 200/200 [00:23<00:00, 8.61it/s, episode=800, return=-1109.290]
  5. Iteration 4: 100%|██████████| 200/200 [00:23<00:00, 8.66it/s, episode=1000, return=-984.067]
  6. Iteration 5: 100%|██████████| 200/200 [00:22<00:00, 8.76it/s, episode=1200, return=-709.332]
  7. Iteration 6: 100%|██████████| 200/200 [00:22<00:00, 8.78it/s, episode=1400, return=-620.103]
  8. Iteration 7: 100%|██████████| 200/200 [00:23<00:00, 8.36it/s, episode=1600, return=-426.315]
  9. Iteration 8: 100%|██████████| 200/200 [00:23<00:00, 8.66it/s, episode=1800, return=-468.169]
  10. Iteration 9: 100%|██████████| 200/200 [00:22<00:00, 8.78it/s, episode=2000, return=-363.595]

4,PPO算法

4.1,算法概述

TRPO算法在很多场景上的应用都很成功,但是我们发现它的计算非常复杂,每一步更新的运算量非常大。于是,TRPO算法的改进版——PPO在2017年被提出,PPO基于TRPO的思想,但是其算法实现更加简单。PPO是TRPO的一种改进算法,它在实现上简化了TRPO中的复杂计算,并且它在实验中的性能大多数情况下会比TRPO更好,因此目前常被用作一种基准算法。TRPO和PPO都属于在线策略算法,即使优化目标中包含重要性采样过程,但其只用到了上一轮策略的数据,而不是过去所有策略的数据。并且大量的实验结果表明,与TRPO相比,PPO能学习得一样好(甚至更快),这使得PPO称为非常流行的强化学习算法。如果想要尝试在一个新的环境中是用强化学习算法,那么PPO就属于可以首次尝试的算法。

TRPO的优化目标:

J^{TRPO}(\theta)=\underset{\theta}{max}\,\,E_{s\sim v^{\pi_{\theta_k}}}E_{a\sim \pi_{\theta_k}(\cdot|s)}\left [ \frac{\pi_{\theta^{'}_k}(a|s)}{\pi_{\theta_k}(a|s)}A^{\pi_{\theta_k}} (s,a)\right ]

使得 E_{s\sim v^{\pi\theta_k}}\left [ D_{KL}\left ( \pi_{\theta_k}(\cdot|s),\pi_{\theta^{'}(\cdot|s)} \right ) \right ]\leqslant \delta

TRPO使用泰勒展开近似、共轭梯度、线性搜索等方法直接求解。PPO的优化目标与TRPO相同,但PPO用了一些简单的方法求解。具体来说,PPO有两种形式:一种是PPO-惩罚;另一种是PPO-截断。

PPO-惩罚:PPO惩罚用拉格朗日乘法直接将KL散度的限制放进目标函数中,这就变成了一个无约束的优化问题,在迭代的过程中不断更新KL散度前的系数,即:

arg\,\,\underset{\theta}{max}\,\,E_{s\sim v^{\pi_{\theta_k}}}E_{a\sim \pi_{\theta_k}(\cdot|s)}\left [ \frac{\pi_{\theta^{'}_k}(a|s)}{\pi_{\theta_k}(a|s)}A^{\pi_{\theta_k}} (s,a)-\beta D_{KL}\left [ \pi_{\theta_k}(\cdot|s),\pi_{\theta^{'}}(\cdot|s) \right ]\right ]

令 d_k=D_{KL}^{v^{\pi_{\theta_k}}}(\pi_{\theta_k,\pi_{\theta^{'}}})\beta 的更新规则如下:

(1)如果 d_k<\delta /1.5,那么 \beta_{k+1}=\beta_k/2 。

(2)如果 d_k>1.5\times \delta,那么 \beta_{k+1}=2\times \beta_k

(3)否则 \beta_{k+1}=\beta_k

其中,\delta 是事先设定的一个超参数,用于限制学习策略和之前一轮策略的差距。

PPO-截断:PPO的另一种形式PPO-截断(PPO-Clip)更加直接,它在目标函数中进行限制,以保证新的参数和旧参数的差距不会太大,即:

arg\,\,\underset{\theta}{max}\,\,E_{s\sim v^{\pi_{\theta_k}}}E_{a\sim \pi_{\theta_k}(\cdot|s)}\left [min\left ( \frac{\pi_{\theta^{'}_k}(a|s)}{\pi_{\theta_k}(a|s)}A^{\pi_{\theta_k}} (s,a) ,clip\left ( \frac{\pi_{\theta^{'}_k}(a|s)}{\pi_{\theta_k}(a|s)},1-\epsilon ,1+\epsilon \right ) A^{\pi_{\theta_k}} (s,a)\right )\right ]

其中,clip(x,l,r):=max(min(x,r),l) ,即把 x 限制在 [l,r] 内。\epsilon 是一个超参数,表示进行截断(clip)的范围。

如果 A^{\pi_{\theta_k}}(s,a)>0 ,说明这个动作的价值高于平均值,最大化这个式子会增加 \frac{\pi_{\theta^{'}_k}(a|s)}{\pi_{\theta_k}(a|s)},但不会让其超过 1+\epsilon 。反之,如果 A^{\pi_{\theta_k}}(s,a)<0 ,最大化这个式子会减小 \frac{\pi_{\theta^{'}_k}(a|s)}{\pi_{\theta_k}(a|s)},但不会让其小于 1-\epsilon 。

4.2,算法实现

离散动作空间:

  1. import gym
  2. import torch
  3. import torch.nn.functional as F
  4. import numpy as np
  5. import matplotlib.pyplot as plt
  6. import rl_utils
  7. class PolicyNet(torch.nn.Module):
  8. def __init__(self, state_dim, hidden_dim, action_dim):
  9. super(PolicyNet, self).__init__()
  10. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  11. self.fc2 = torch.nn.Linear(hidden_dim, action_dim)
  12. def forward(self, x):
  13. x = F.relu(self.fc1(x))
  14. return F.softmax(self.fc2(x), dim=1)
  15. class ValueNet(torch.nn.Module):
  16. def __init__(self, state_dim, hidden_dim):
  17. super(ValueNet, self).__init__()
  18. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  19. self.fc2 = torch.nn.Linear(hidden_dim, 1)
  20. def forward(self, x):
  21. x = F.relu(self.fc1(x))
  22. return self.fc2(x)
  23. class PPO:
  24. ''' PPO算法,采用截断方式 '''
  25. def __init__(self, state_dim, hidden_dim, action_dim, actor_lr, critic_lr,lmbda, epochs, eps, gamma, device):
  26. self.actor = PolicyNet(state_dim, hidden_dim, action_dim).to(device)
  27. self.critic = ValueNet(state_dim, hidden_dim).to(device)
  28. self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),lr=actor_lr)
  29. self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),lr=critic_lr)
  30. self.gamma = gamma
  31. self.lmbda = lmbda
  32. self.epochs = epochs # 一条序列的数据用来训练轮数
  33. self.eps = eps # PPO中截断范围的参数
  34. self.device = device
  35. def take_action(self, state):
  36. state = torch.tensor([state], dtype=torch.float).to(self.device)
  37. probs = self.actor(state)
  38. action_dist = torch.distributions.Categorical(probs)
  39. action = action_dist.sample()
  40. return action.item()
  41. def update(self, transition_dict):
  42. states = torch.tensor(transition_dict['states'],dtype=torch.float).to(self.device)
  43. actions = torch.tensor(transition_dict['actions']).view(-1, 1).to(self.device)
  44. rewards = torch.tensor(transition_dict['rewards'],dtype=torch.float).view(-1, 1).to(self.device)
  45. next_states = torch.tensor(transition_dict['next_states'],dtype=torch.float).to(self.device)
  46. dones = torch.tensor(transition_dict['dones'],dtype=torch.float).view(-1, 1).to(self.device)
  47. td_target = rewards + self.gamma * self.critic(next_states) * (1 -dones)
  48. td_delta = td_target - self.critic(states)
  49. advantage = rl_utils.compute_advantage(self.gamma, self.lmbda,td_delta.cpu()).to(self.device)
  50. old_log_probs = torch.log(self.actor(states).gather(1, actions)).detach()
  51. for _ in range(self.epochs):
  52. log_probs = torch.log(self.actor(states).gather(1, actions))
  53. ratio = torch.exp(log_probs - old_log_probs)
  54. surr1 = ratio * advantage
  55. surr2 = torch.clamp(ratio, 1 - self.eps,1 + self.eps) * advantage # 截断
  56. actor_loss = torch.mean(-torch.min(surr1, surr2)) # PPO损失函数
  57. critic_loss = torch.mean(F.mse_loss(self.critic(states), td_target.detach()))
  58. self.actor_optimizer.zero_grad()
  59. self.critic_optimizer.zero_grad()
  60. actor_loss.backward()
  61. critic_loss.backward()
  62. self.actor_optimizer.step()
  63. self.critic_optimizer.step()
  64. actor_lr = 1e-3
  65. critic_lr = 1e-2
  66. num_episodes = 500
  67. hidden_dim = 128
  68. gamma = 0.98
  69. lmbda = 0.95
  70. epochs = 10
  71. eps = 0.2
  72. device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  73. env_name = 'CartPole-v0'
  74. env = gym.make(env_name)
  75. env.seed(0)
  76. torch.manual_seed(0)
  77. state_dim = env.observation_space.shape[0]
  78. action_dim = env.action_space.n
  79. agent = PPO(state_dim, hidden_dim, action_dim, actor_lr, critic_lr, lmbda, epochs, eps, gamma, device)
  80. return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes)
  81. episodes_list = list(range(len(return_list)))
  82. plt.plot(episodes_list, return_list)
  83. plt.xlabel('Episodes')
  84. plt.ylabel('Returns')
  85. plt.title('PPO on {}'.format(env_name))
  86. plt.show()
  87. mv_return = rl_utils.moving_average(return_list, 9)
  88. plt.plot(episodes_list, mv_return)
  89. plt.xlabel('Episodes')
  90. plt.ylabel('Returns')
  91. plt.title('PPO on {}'.format(env_name))
  92. plt.show()

连续动作空间:

  1. class PolicyNetContinuous(torch.nn.Module):
  2. def __init__(self, state_dim, hidden_dim, action_dim):
  3. super(PolicyNetContinuous, self).__init__()
  4. self.fc1 = torch.nn.Linear(state_dim, hidden_dim)
  5. self.fc_mu = torch.nn.Linear(hidden_dim, action_dim)
  6. self.fc_std = torch.nn.Linear(hidden_dim, action_dim)
  7. def forward(self, x):
  8. x = F.relu(self.fc1(x))
  9. mu = 2.0 * torch.tanh(self.fc_mu(x))
  10. std = F.softplus(self.fc_std(x))
  11. return mu, std
  12. class PPOContinuous:
  13. ''' 处理连续动作的PPO算法 '''
  14. def __init__(self, state_dim, hidden_dim, action_dim, actor_lr, critic_lr,lmbda, epochs, eps, gamma, device):
  15. self.actor = PolicyNetContinuous(state_dim, hidden_dim,action_dim).to(device)
  16. self.critic = ValueNet(state_dim, hidden_dim).to(device)
  17. self.actor_optimizer = torch.optim.Adam(self.actor.parameters(),lr=actor_lr)
  18. self.critic_optimizer = torch.optim.Adam(self.critic.parameters(),lr=critic_lr)
  19. self.gamma = gamma
  20. self.lmbda = lmbda
  21. self.epochs = epochs
  22. self.eps = eps
  23. self.device = device
  24. def take_action(self, state):
  25. state = torch.tensor([state], dtype=torch.float).to(self.device)
  26. mu, sigma = self.actor(state)
  27. action_dist = torch.distributions.Normal(mu, sigma)
  28. action = action_dist.sample()
  29. return [action.item()]
  30. def update(self, transition_dict):
  31. states = torch.tensor(transition_dict['states'], dtype=torch.float).to(self.device)
  32. actions = torch.tensor(transition_dict['actions'],dtype=torch.float).view(-1, 1).to(self.device)
  33. rewards = torch.tensor(transition_dict['rewards'],dtype=torch.float).view(-1, 1).to(self.device)
  34. next_states = torch.tensor(transition_dict['next_states'],dtype=torch.float).to(self.device)
  35. dones = torch.tensor(transition_dict['dones'],dtype=torch.float).view(-1, 1).to(self.device)
  36. rewards = (rewards + 8.0) / 8.0 # 和TRPO一样,对奖励进行修改,方便训练
  37. td_target = rewards + self.gamma * self.critic(next_states) * (1 -dones)
  38. td_delta = td_target - self.critic(states)
  39. advantage = rl_utils.compute_advantage(self.gamma, self.lmbda,td_delta.cpu()).to(self.device)
  40. mu, std = self.actor(states)
  41. action_dists = torch.distributions.Normal(mu.detach(), std.detach())
  42. # 动作是正态分布
  43. old_log_probs = action_dists.log_prob(actions)
  44. for _ in range(self.epochs):
  45. mu, std = self.actor(states)
  46. action_dists = torch.distributions.Normal(mu, std)
  47. log_probs = action_dists.log_prob(actions)
  48. ratio = torch.exp(log_probs - old_log_probs)
  49. surr1 = ratio * advantage
  50. surr2 = torch.clamp(ratio, 1 - self.eps, 1 + self.eps) * advantage
  51. actor_loss = torch.mean(-torch.min(surr1, surr2))
  52. critic_loss = torch.mean(F.mse_loss(self.critic(states), td_target.detach()))
  53. self.actor_optimizer.zero_grad()
  54. self.critic_optimizer.zero_grad()
  55. actor_loss.backward()
  56. critic_loss.backward()
  57. self.actor_optimizer.step()
  58. self.critic_optimizer.step()
  59. actor_lr = 1e-4
  60. critic_lr = 5e-3
  61. num_episodes = 2000
  62. hidden_dim = 128
  63. gamma = 0.9
  64. lmbda = 0.9
  65. epochs = 10
  66. eps = 0.2
  67. device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu")
  68. env_name = 'Pendulum-v0'
  69. env = gym.make(env_name)
  70. env.seed(0)
  71. torch.manual_seed(0)
  72. state_dim = env.observation_space.shape[0]
  73. action_dim = env.action_space.shape[0] # 连续动作空间
  74. agent = PPOContinuous(state_dim, hidden_dim, action_dim, actor_lr, critic_lr,lmbda, epochs, eps, gamma, device)
  75. return_list = rl_utils.train_on_policy_agent(env, agent, num_episodes)
  76. episodes_list = list(range(len(return_list)))
  77. plt.plot(episodes_list, return_list)
  78. plt.xlabel('Episodes')
  79. plt.ylabel('Returns')
  80. plt.title('PPO on {}'.format(env_name))
  81. plt.show()
  82. mv_return = rl_utils.moving_average(return_list, 21)
  83. plt.plot(episodes_list, mv_return)
  84. plt.xlabel('Episodes')
  85. plt.ylabel('Returns')
  86. plt.title('PPO on {}'.format(env_name))
  87. plt.show()
声明:本文内容由网友自发贡献,不代表【wpsshop博客】立场,版权归原作者所有,本站不承担相应法律责任。如您发现有侵权的内容,请联系我们。转载请注明出处:https://www.wpsshop.cn/w/我家小花儿/article/detail/928261
推荐阅读
相关标签
  

闽ICP备14008679号