metastable-baselines/test.py

135 lines
3.9 KiB
Python
Raw Normal View History

2022-06-22 13:00:40 +02:00
#!/bin/python3
import gym
2022-06-19 15:50:54 +02:00
from gym.envs.registration import register
import numpy as np
2022-06-22 13:00:40 +02:00
import os
import time
import datetime
from stable_baselines3 import SAC, PPO, A2C
from stable_baselines3.common.evaluation import evaluate_policy
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, MultiInputActorCriticPolicy
from sb3_trl.trl_pg import TRL_PG
import columbus
2022-06-19 15:50:54 +02:00
2022-06-22 13:00:40 +02:00
#root_path = os.getcwd()
root_path = '.'
2022-06-25 14:50:19 +02:00
2022-06-26 16:38:46 +02:00
def main(env_name='ColumbusCandyland_Aux10-v0', timesteps=500000, showRes=True, saveModel=True, n_eval_episodes=0):
env = gym.make(env_name)
2022-06-25 14:50:19 +02:00
test_sde = False
ppo = PPO(
"MlpPolicy",
env,
verbose=0,
tensorboard_log=root_path+"/logs_tb/"+env_name+"/ppo/",
learning_rate=3e-4,
gamma=0.99,
gae_lambda=0.95,
normalize_advantage=True,
2022-06-26 16:38:46 +02:00
ent_coef=0.1, # 0.1
2022-06-25 14:50:19 +02:00
vf_coef=0.5,
use_sde=False, # False
)
trl_pg = TRL_PG(
"MlpPolicy",
env,
verbose=0,
2022-06-25 14:50:19 +02:00
tensorboard_log=root_path+"/logs_tb/"+env_name+"/trl_pg/",
learning_rate=3e-4,
gamma=0.99,
gae_lambda=0.95,
normalize_advantage=True,
2022-06-26 16:38:46 +02:00
ent_coef=0.1, # 0.1
2022-06-25 14:50:19 +02:00
vf_coef=0.5,
use_sde=False, # False
)
2022-06-25 14:50:19 +02:00
if test_sde:
ppo_latent_sde = PPO(
"MlpPolicy",
env,
verbose=0,
tensorboard_log=root_path+"/logs_tb/"+env_name+"/ppo_latent_sde/",
learning_rate=3e-4,
gamma=0.99,
gae_lambda=0.95,
normalize_advantage=True,
ent_coef=0.15, # 0.1
vf_coef=0.5,
use_sde=True, # False
sde_sample_freq=30*15, # -1
)
trl_pg_latent_sde = TRL_PG(
"MlpPolicy",
env,
verbose=0,
tensorboard_log=root_path+"/logs_tb/"+env_name+"/trl_pg_latent_sde/",
learning_rate=3e-4,
gamma=0.99,
gae_lambda=0.95,
normalize_advantage=True,
ent_coef=0.15, # 0.1
vf_coef=0.5,
use_sde=True, # False
sde_sample_freq=30*15, # -1
)
# sac_latent_sde = SAC(
2022-06-22 13:00:40 +02:00
# "MlpPolicy",
# env,
# verbose=0,
# tensorboard_log=root_path+"/logs_tb/"+env_name+"/sac_latent_sde/",
# use_sde=True,
# sde_sample_freq=30*15,
# ent_coef=0.0016, #0.0032
# gamma=0.99, # 0.95
# learning_rate=0.001 # 0.015
2022-06-25 14:50:19 +02:00
# )
2022-06-25 14:50:19 +02:00
print('TRL_PG:')
testModel(trl_pg, timesteps, showRes,
saveModel, n_eval_episodes)
2022-06-26 16:38:46 +02:00
#print('PPO:')
#testModel(ppo, timesteps, showRes,
# saveModel, n_eval_episodes)
2022-06-25 14:50:19 +02:00
def testModel(model, timesteps, showRes=False, saveModel=False, n_eval_episodes=16):
env = model.get_env()
model.learn(timesteps)
if saveModel:
now = datetime.datetime.now().strftime('%d.%m.%Y-%H:%M')
2022-06-25 14:50:19 +02:00
loc = root_path+'/models/' + \
model.tensorboard_log.replace(
root_path+'/logs_tb/', '').replace('/', '_')+now+'.zip'
2022-06-22 13:00:40 +02:00
model.save(loc)
2022-06-19 20:34:04 +02:00
if n_eval_episodes:
2022-06-25 14:50:19 +02:00
mean_reward, std_reward = evaluate_policy(
model, env, n_eval_episodes=n_eval_episodes, deterministic=False)
print('Reward: '+str(round(mean_reward, 3)) +
'±'+str(round(std_reward, 2)))
if showRes:
2022-06-19 20:34:04 +02:00
input('<ready?>')
obs = env.reset()
# Evaluate the agent
episode_reward = 0
2022-06-22 13:12:55 +02:00
while True:
2022-06-25 14:50:19 +02:00
time.sleep(1/30)
action, _ = model.predict(obs, deterministic=False)
obs, reward, done, info = env.step(action)
env.render()
episode_reward += reward
if done:
#print("Reward:", episode_reward)
episode_reward = 0.0
obs = env.reset()
env.reset()
2022-06-25 14:50:19 +02:00
if __name__ == '__main__':
main('LunarLanderContinuous-v2')