1 Star 0 Fork 1

zhangyibing/PPO-Pyorch

forked from zhou_leo/PPO-Pyorch 
加入 Gitee
与超过 1200万 开发者一起发现、参与优秀开源项目,私有仓库也完全免费 :)
免费加入
文件
该仓库未声明开源许可证文件(LICENSE),使用请关注具体项目描述及其代码上游依赖。
克隆/下载
test_continuous.py 1.98 KB
一键复制 编辑 原始数据 按行查看 历史
Leo 提交于 2023-06-03 16:35 . start
import gym
from PPO_continuous import PPO, Memory
from PIL import Image
import torch
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
def test():
############## Hyperparameters ##############
env_name = "BipedalWalker-v2"
env = gym.make(env_name)
state_dim = env.observation_space.shape[0]
action_dim = env.action_space.shape[0]
n_episodes = 3 # num of episodes to run
max_timesteps = 1500 # max timesteps in one episode
render = True # render the environment
save_gif = False # png images are saved in gif folder
# filename and directory to load model from
filename = "PPO_continuous_" +env_name+ ".pth"
directory = "./preTrained/"
action_std = 0.5 # constant std for action distribution (Multivariate Normal)
K_epochs = 80 # update policy for K epochs
eps_clip = 0.2 # clip parameter for PPO
gamma = 0.99 # discount factor
lr = 0.0003 # parameters for Adam optimizer
betas = (0.9, 0.999)
#############################################
memory = Memory()
ppo = PPO(state_dim, action_dim, action_std, lr, betas, gamma, K_epochs, eps_clip)
ppo.policy_old.load_state_dict(torch.load(directory+filename))
for ep in range(1, n_episodes+1):
ep_reward = 0
state = env.reset()
for t in range(max_timesteps):
action = ppo.select_action(state, memory)
state, reward, done, _ = env.step(action)
ep_reward += reward
if render:
env.render()
if save_gif:
img = env.render(mode = 'rgb_array')
img = Image.fromarray(img)
img.save('./gif/{}.jpg'.format(t))
if done:
break
print('Episode: {}\tReward: {}'.format(ep, int(ep_reward)))
ep_reward = 0
env.close()
if __name__ == '__main__':
test()
Loading...
马建仓 AI 助手
尝试更多
代码解读
代码找茬
代码优化
1
https://gitee.com/zhangyibingzz/ppo-pyorch.git
[email protected]:zhangyibingzz/ppo-pyorch.git
zhangyibingzz
ppo-pyorch
PPO-Pyorch
master

搜索帮助