2022-06-22 13:00:40 +02:00
|
|
|
#!/bin/python3
|
2022-06-17 11:29:36 +02:00
|
|
|
import gym
|
2022-06-19 15:50:54 +02:00
|
|
|
from gym.envs.registration import register
|
2022-06-17 11:29:36 +02:00
|
|
|
import numpy as np
|
2022-06-22 13:00:40 +02:00
|
|
|
import os
|
2022-06-17 11:29:36 +02:00
|
|
|
import time
|
2022-06-20 23:12:42 +02:00
|
|
|
import datetime
|
2022-06-17 11:29:36 +02:00
|
|
|
|
|
|
|
from stable_baselines3 import SAC, PPO, A2C
|
|
|
|
from stable_baselines3.common.evaluation import evaluate_policy
|
2022-06-20 23:12:42 +02:00
|
|
|
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, MultiInputActorCriticPolicy
|
2022-06-17 11:29:36 +02:00
|
|
|
|
|
|
|
from sb3_trl.trl_pg import TRL_PG
|
2022-06-20 23:12:42 +02:00
|
|
|
import columbus
|
2022-06-19 15:50:54 +02:00
|
|
|
|
2022-06-22 13:00:40 +02:00
|
|
|
#root_path = os.getcwd()
|
|
|
|
root_path = '.'
|
2022-06-17 11:29:36 +02:00
|
|
|
|
2022-06-22 13:00:40 +02:00
|
|
|
def main(env_name='ColumbusStateWithBarriers-v0'):
|
2022-06-20 23:12:42 +02:00
|
|
|
env = gym.make(env_name)
|
2022-06-19 22:47:04 +02:00
|
|
|
ppo_latent_sde = PPO(
|
|
|
|
"MlpPolicy",
|
|
|
|
env,
|
2022-06-21 15:15:38 +02:00
|
|
|
verbose=0,
|
2022-06-22 13:00:40 +02:00
|
|
|
tensorboard_log=root_path+"/logs_tb/"+env_name+"/ppo_latent_sde/",
|
2022-06-22 13:12:55 +02:00
|
|
|
use_sde=True,
|
2022-06-20 23:12:42 +02:00
|
|
|
sde_sample_freq=30*15,
|
2022-06-22 13:00:40 +02:00
|
|
|
#ent_coef=0.0016/1.25, #0.0032
|
|
|
|
#vf_coef=0.00025/2, #0.0005
|
|
|
|
#gamma=0.99, # 0.95
|
|
|
|
#learning_rate=0.005/5 # 0.015
|
2022-06-17 11:29:36 +02:00
|
|
|
)
|
2022-06-22 13:00:40 +02:00
|
|
|
#sac_latent_sde = SAC(
|
|
|
|
# "MlpPolicy",
|
|
|
|
# env,
|
|
|
|
# verbose=0,
|
|
|
|
# tensorboard_log=root_path+"/logs_tb/"+env_name+"/sac_latent_sde/",
|
|
|
|
# use_sde=True,
|
|
|
|
# sde_sample_freq=30*15,
|
|
|
|
# ent_coef=0.0016, #0.0032
|
|
|
|
# gamma=0.99, # 0.95
|
|
|
|
# learning_rate=0.001 # 0.015
|
|
|
|
#)
|
2022-06-20 23:12:42 +02:00
|
|
|
#trl = TRL_PG(
|
|
|
|
# "MlpPolicy",
|
|
|
|
# env,
|
|
|
|
# verbose=0,
|
|
|
|
# tensorboard_log="./logs_tb/"+env_name+"/trl_pg/",
|
|
|
|
#)
|
2022-06-17 11:29:36 +02:00
|
|
|
|
2022-06-22 13:00:40 +02:00
|
|
|
print('PPO_LATENT_SDE:')
|
2022-06-22 13:12:55 +02:00
|
|
|
testModel(ppo_latent_sde, 500000, showRes = True, saveModel=True, n_eval_episodes=3)
|
2022-06-22 13:00:40 +02:00
|
|
|
#print('SAC_LATENT_SDE:')
|
|
|
|
#testModel(sac_latent_sde, 250000, showRes = True, saveModel=True, n_eval_episodes=0)
|
2022-06-19 20:34:04 +02:00
|
|
|
#print('TRL_PG:')
|
|
|
|
#testModel(trl)
|
2022-06-17 11:29:36 +02:00
|
|
|
|
|
|
|
|
2022-06-20 23:12:42 +02:00
|
|
|
def testModel(model, timesteps=150000, showRes=False, saveModel=False, n_eval_episodes=16):
|
2022-06-17 11:29:36 +02:00
|
|
|
env = model.get_env()
|
|
|
|
model.learn(timesteps)
|
|
|
|
|
2022-06-20 23:12:42 +02:00
|
|
|
if saveModel:
|
|
|
|
now = datetime.datetime.now().strftime('%d.%m.%Y-%H:%M')
|
2022-06-22 13:00:40 +02:00
|
|
|
loc = root_path+'/models/'+model.tensorboard_log.replace(root_path+'/logs_tb/','').replace('/','_')+now+'.zip'
|
|
|
|
model.save(loc)
|
2022-06-20 23:12:42 +02:00
|
|
|
|
2022-06-19 20:34:04 +02:00
|
|
|
if n_eval_episodes:
|
|
|
|
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=n_eval_episodes, deterministic=False)
|
|
|
|
print('Reward: '+str(round(mean_reward,3))+'±'+str(round(std_reward,2)))
|
2022-06-17 11:29:36 +02:00
|
|
|
|
|
|
|
if showRes:
|
2022-06-19 20:34:04 +02:00
|
|
|
input('<ready?>')
|
2022-06-17 11:29:36 +02:00
|
|
|
obs = env.reset()
|
|
|
|
# Evaluate the agent
|
|
|
|
episode_reward = 0
|
2022-06-22 13:12:55 +02:00
|
|
|
while True:
|
|
|
|
time.sleep(1/env.fps)
|
2022-06-17 11:29:36 +02:00
|
|
|
action, _ = model.predict(obs, deterministic=False)
|
|
|
|
obs, reward, done, info = env.step(action)
|
|
|
|
env.render()
|
|
|
|
episode_reward += reward
|
|
|
|
if done:
|
|
|
|
#print("Reward:", episode_reward)
|
|
|
|
episode_reward = 0.0
|
|
|
|
obs = env.reset()
|
|
|
|
env.reset()
|
|
|
|
|
|
|
|
if __name__=='__main__':
|
|
|
|
main()
|