Trying to converge on simple columbus envs
This commit is contained in:
parent
b9303416ac
commit
e71735bf79
80
test.py
80
test.py
@ -2,91 +2,61 @@ import gym
|
||||
from gym.envs.registration import register
|
||||
import numpy as np
|
||||
import time
|
||||
import datetime
|
||||
|
||||
from stable_baselines3 import SAC, PPO, A2C
|
||||
from stable_baselines3.common.evaluation import evaluate_policy
|
||||
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, MultiInputActorCriticPolicy
|
||||
|
||||
from sb3_trl.trl_pg import TRL_PG
|
||||
from columbus import env
|
||||
import columbus
|
||||
|
||||
register(
|
||||
id='ColumbusTestRay-v0',
|
||||
entry_point=env.ColumbusTestRay,
|
||||
max_episode_steps=30*60*5,
|
||||
)
|
||||
|
||||
def main():
|
||||
#env = gym.make("LunarLander-v2")
|
||||
env = gym.make("ColumbusTestRay-v0")
|
||||
|
||||
ppo = PPO(
|
||||
"MlpPolicy",
|
||||
env,
|
||||
verbose=1,
|
||||
tensorboard_log="./logs_tb/test/ppo",
|
||||
use_sde=False,
|
||||
ent_coef=0.0001,
|
||||
learning_rate=0.0004
|
||||
)
|
||||
ppo_base_sde = PPO(
|
||||
"MlpPolicy",
|
||||
env,
|
||||
verbose=1,
|
||||
tensorboard_log="./logs_tb/test/ppo_base_sde/",
|
||||
use_sde=True,
|
||||
sde_sample_freq=30*20,
|
||||
sde_net_arch=[],
|
||||
ent_coef=0.000001,
|
||||
learning_rate=0.0003
|
||||
)
|
||||
def main(env_name='ColumbusEasyObstacles-v0'):
|
||||
env = gym.make(env_name)
|
||||
ppo_latent_sde = PPO(
|
||||
"MlpPolicy",
|
||||
env,
|
||||
verbose=1,
|
||||
tensorboard_log="./logs_tb/test/ppo_latent_sde/",
|
||||
tensorboard_log="./logs_tb/"+env_name+"/ppo_latent_sde/",
|
||||
use_sde=True,
|
||||
sde_sample_freq=30*20,
|
||||
ent_coef=0.000001,
|
||||
learning_rate=0.0003
|
||||
)
|
||||
a2c = A2C(
|
||||
"MlpPolicy",
|
||||
env,
|
||||
verbose=1,
|
||||
tensorboard_log="./logs_tb/test/a2c/",
|
||||
)
|
||||
trl = TRL_PG(
|
||||
"MlpPolicy",
|
||||
env,
|
||||
verbose=0,
|
||||
tensorboard_log="./logs_tb/test/trl_pg/",
|
||||
sde_sample_freq=30*15,
|
||||
ent_coef=0.0032,
|
||||
vf_coef=0.0005,
|
||||
gamma=0.95,
|
||||
learning_rate=0.02
|
||||
)
|
||||
#trl = TRL_PG(
|
||||
# "MlpPolicy",
|
||||
# env,
|
||||
# verbose=0,
|
||||
# tensorboard_log="./logs_tb/"+env_name+"/trl_pg/",
|
||||
#)
|
||||
|
||||
#print('PPO:')
|
||||
#testModel(ppo, 500000, showRes = True, saveModel=True, n_eval_episodes=4)
|
||||
print('PPO_BASE_SDE:')
|
||||
testModel(ppo_base_sde, 200000, showRes = True, saveModel=True, n_eval_episodes=0)
|
||||
#print('A2C:')
|
||||
#testModel(a2c, showRes = True)
|
||||
print('PPO_LATENT_SDE:')
|
||||
testModel(ppo_latent_sde, 100000, showRes = True, saveModel=True, n_eval_episodes=0)
|
||||
#print('TRL_PG:')
|
||||
#testModel(trl)
|
||||
|
||||
|
||||
def testModel(model, timesteps=100000, showRes=False, saveModel=False, n_eval_episodes=16):
|
||||
def testModel(model, timesteps=150000, showRes=False, saveModel=False, n_eval_episodes=16):
|
||||
env = model.get_env()
|
||||
model.learn(timesteps)
|
||||
|
||||
if saveModel:
|
||||
now = datetime.datetime.now().strftime('%d.%m.%Y-%H:%M')
|
||||
model.save(model.tensorboard_log.replace('./logs_tb/','').replace('/','_')+now+'.zip')
|
||||
|
||||
if n_eval_episodes:
|
||||
mean_reward, std_reward = evaluate_policy(model, env, n_eval_episodes=n_eval_episodes, deterministic=False)
|
||||
print('Reward: '+str(round(mean_reward,3))+'±'+str(round(std_reward,2)))
|
||||
|
||||
if showRes:
|
||||
model.save("model")
|
||||
input('<ready?>')
|
||||
obs = env.reset()
|
||||
# Evaluate the agent
|
||||
episode_reward = 0
|
||||
for _ in range(1000):
|
||||
for _ in range(30*60*5):
|
||||
time.sleep(1/30)
|
||||
action, _ = model.predict(obs, deterministic=False)
|
||||
obs, reward, done, info = env.step(action)
|
||||
|
Loading…
Reference in New Issue
Block a user