赞
踩
ptan.agent.DQNAgent
和ptan.agent.PolicyAgent
是ptan库中封装好的agent函数,需要传入NN和动作选择器,然后输出动作的索引,十分便捷
代码:
import ptan
import torch
import torch.nn as nn
# 自定义DQN的NN模型
class DQNNet(nn.Module):
def __init__(self, actions: int):
super(DQNNet, self).__init__()
self.actions = actions
def forward(self, x):
# we always produce diagonal tensor of shape (batch_size, actions)
return torch.eye(x.size()[0], self.actions)
# 自定义策略网络,策略网络的输出应该是动作对应的概率分布
class PolicyNet(nn.Module):
def __init__(self, actions: int):
super(PolicyNet, self).__init__()
self.actions = actions
def forward(self, x):
# Now we produce the tensor with first two actions
# having the same logit scores
shape = (x.size()[0], self.actions)
res = torch.zeros(shape, dtype=torch.float32)
res[:, 0] = 1
res[:, 1] = 1
return res
net = DQNNet(actions=3) # 初始化DQN的NN网络
net_out = net(torch.zeros(6, 10))
print("DQNNet:")
print(net_out)
selector = ptan.actions.ArgmaxActionSelector() # 定义动作选择器
agent = ptan.agent.DQNAgent(dqn_model=net, action_selector=selector) # 初始化agent,需要NN网络,动作选择器
ag_out = agent(torch.zeros(2, 5)) # 给agent传入的是一组tensor状态,2是batch,5是观察空间维度
print("Argmax:", ag_out) # 返回值第一位是动作索引a,第二位是智能体内部状态的列表
# 定义greedy动作选择器,[0,epsilon]的概率随机选动作,[epsilon,1]的概率是NN输出
selector = ptan.actions.EpsilonGreedyActionSelector(epsilon=1.0)
agent = ptan.agent.DQNAgent(dqn_model=net, action_selector=selector)
ag_out = agent(torch.zeros(10, 5))[0]
print("eps=1.0:", ag_out)
selector.epsilon = 0.5
ag_out = agent(torch.zeros(10, 5))[0]
print("eps=0.5:", ag_out)
selector.epsilon = 0.1
ag_out = agent(torch.zeros(10, 5))[0]
print("eps=0.1:", ag_out)
net = PolicyNet(actions=5) # 初始化策略网络
net_out = net(torch.zeros(6, 10))
print("policy_net:")
print(net_out)
selector = ptan.actions.ProbabilityActionSelector() # 定义动作选择器,以概率分布选择动作
# 此处apply_softmax=True 则说明不需要在PolicyNet网络中定义softmax,但是必须有一处有softmax
# 因为NN输出可能有负值,所以需要softmax来保证得到的输出值为正的归一化概率分布
agent = ptan.agent.PolicyAgent(model=net, action_selector=selector, apply_softmax=True)
ag_out = agent(torch.zeros(6, 5))[0]
print(ag_out)
输出:
DQNNet:
tensor([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[0., 0., 0.],
[0., 0., 0.],
[0., 0., 0.]])
Argmax: (array([0, 1], dtype=int64), [None, None])
eps=1.0: [1 2 0 0 2 2 2 2 0 0]
eps=0.5: [0 0 0 0 0 1 0 0 1 0]
eps=0.1: [0 1 2 0 0 0 0 1 0 0]
policy_net:
tensor([[1., 1., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[1., 1., 0., 0., 0.],
[1., 1., 0., 0., 0.]])
[1 0 4 3 1 0]
Copyright © 2003-2013 www.wpsshop.cn 版权所有,并保留所有权利。