expanding automatic testing

This commit is contained in:
Dominik Moritz Roth 2022-06-25 14:50:19 +02:00
parent 25316ec0b8
commit 1a49a412c0

104
test.py
View File

@ -16,21 +16,66 @@ import columbus
#root_path = os.getcwd() #root_path = os.getcwd()
root_path = '.' root_path = '.'
def main(env_name='ColumbusStateWithBarriers-v0'):
def main(env_name='ColumbusCandyland_Aux10-v0', timesteps=5000, showRes=False, saveModel=True, n_eval_episodes=8):
env = gym.make(env_name) env = gym.make(env_name)
ppo_latent_sde = PPO( test_sde = False
ppo = PPO(
"MlpPolicy", "MlpPolicy",
env, env,
verbose=0, verbose=0,
tensorboard_log=root_path+"/logs_tb/"+env_name+"/ppo_latent_sde/", tensorboard_log=root_path+"/logs_tb/"+env_name+"/ppo/",
use_sde=True, learning_rate=3e-4,
sde_sample_freq=30*15, gamma=0.99,
#ent_coef=0.0016/1.25, #0.0032 gae_lambda=0.95,
#vf_coef=0.00025/2, #0.0005 normalize_advantage=True,
#gamma=0.99, # 0.95 ent_coef=0.15, # 0.1
#learning_rate=0.005/5 # 0.015 vf_coef=0.5,
use_sde=False, # False
) )
#sac_latent_sde = SAC( trl_pg = TRL_PG(
"MlpPolicy",
env,
verbose=0,
tensorboard_log=root_path+"/logs_tb/"+env_name+"/trl_pg/",
learning_rate=3e-4,
gamma=0.99,
gae_lambda=0.95,
normalize_advantage=True,
ent_coef=0.15, # 0.1
vf_coef=0.5,
use_sde=False, # False
)
if test_sde:
ppo_latent_sde = PPO(
"MlpPolicy",
env,
verbose=0,
tensorboard_log=root_path+"/logs_tb/"+env_name+"/ppo_latent_sde/",
learning_rate=3e-4,
gamma=0.99,
gae_lambda=0.95,
normalize_advantage=True,
ent_coef=0.15, # 0.1
vf_coef=0.5,
use_sde=True, # False
sde_sample_freq=30*15, # -1
)
trl_pg_latent_sde = TRL_PG(
"MlpPolicy",
env,
verbose=0,
tensorboard_log=root_path+"/logs_tb/"+env_name+"/trl_pg_latent_sde/",
learning_rate=3e-4,
gamma=0.99,
gae_lambda=0.95,
normalize_advantage=True,
ent_coef=0.15, # 0.1
vf_coef=0.5,
use_sde=True, # False
sde_sample_freq=30*15, # -1
)
# sac_latent_sde = SAC(
# "MlpPolicy", # "MlpPolicy",
# env, # env,
# verbose=0, # verbose=0,
@ -40,34 +85,32 @@ def main(env_name='ColumbusStateWithBarriers-v0'):
# ent_coef=0.0016, #0.0032 # ent_coef=0.0016, #0.0032
# gamma=0.99, # 0.95 # gamma=0.99, # 0.95
# learning_rate=0.001 # 0.015 # learning_rate=0.001 # 0.015
#) # )
#trl = TRL_PG(
# "MlpPolicy",
# env,
# verbose=0,
# tensorboard_log="./logs_tb/"+env_name+"/trl_pg/",
#)
print('PPO_LATENT_SDE:') print('PPO:')
testModel(ppo_latent_sde, 500000, showRes = True, saveModel=True, n_eval_episodes=3) testModel(ppo, timesteps, showRes,
#print('SAC_LATENT_SDE:') saveModel, n_eval_episodes)
#testModel(sac_latent_sde, 250000, showRes = True, saveModel=True, n_eval_episodes=0) print('TRL_PG:')
#print('TRL_PG:') testModel(trl_pg, timesteps, showRes,
#testModel(trl) saveModel, n_eval_episodes)
def testModel(model, timesteps=150000, showRes=False, saveModel=False, n_eval_episodes=16): def testModel(model, timesteps, showRes=False, saveModel=False, n_eval_episodes=16):
env = model.get_env() env = model.get_env()
model.learn(timesteps) model.learn(timesteps)
if saveModel: if saveModel:
now = datetime.datetime.now().strftime('%d.%m.%Y-%H:%M') now = datetime.datetime.now().strftime('%d.%m.%Y-%H:%M')
loc = root_path+'/models/'+model.tensorboard_log.replace(root_path+'/logs_tb/','').replace('/','_')+now+'.zip' loc = root_path+'/models/' + \
model.tensorboard_log.replace(
root_path+'/logs_tb/', '').replace('/', '_')+now+'.zip'
model.save(loc) model.save(loc)
if n_eval_episodes: if n_eval_episodes:
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=n_eval_episodes, deterministic=False) mean_reward, std_reward = evaluate_policy(
print('Reward: '+str(round(mean_reward,3))+'±'+str(round(std_reward,2))) model, env, n_eval_episodes=n_eval_episodes, deterministic=False)
print('Reward: '+str(round(mean_reward, 3)) +
'±'+str(round(std_reward, 2)))
if showRes: if showRes:
input('<ready?>') input('<ready?>')
@ -75,7 +118,7 @@ def testModel(model, timesteps=150000, showRes=False, saveModel=False, n_eval_ep
# Evaluate the agent # Evaluate the agent
episode_reward = 0 episode_reward = 0
while True: while True:
time.sleep(1/env.fps) time.sleep(1/30)
action, _ = model.predict(obs, deterministic=False) action, _ = model.predict(obs, deterministic=False)
obs, reward, done, info = env.step(action) obs, reward, done, info = env.step(action)
env.render() env.render()
@ -86,5 +129,6 @@ def testModel(model, timesteps=150000, showRes=False, saveModel=False, n_eval_ep
obs = env.reset() obs = env.reset()
env.reset() env.reset()
if __name__=='__main__':
main() if __name__ == '__main__':
main('CartPole-v1')