2022-07-01 19:52:22 +02:00
|
|
|
#!/usr/bin/python3
|
2022-06-17 11:29:36 +02:00
|
|
|
import gym
|
|
|
|
import time
|
2022-06-20 23:12:42 +02:00
|
|
|
import datetime
|
2022-06-17 11:29:36 +02:00
|
|
|
|
|
|
|
from stable_baselines3.common.evaluation import evaluate_policy
|
2022-07-19 10:08:47 +02:00
|
|
|
from metastable_baselines.distributions.distributions import get_legal_setups
|
2022-06-17 11:29:36 +02:00
|
|
|
|
2022-07-13 19:51:33 +02:00
|
|
|
from metastable_baselines.ppo import PPO
|
2022-07-19 10:08:47 +02:00
|
|
|
from metastable_baselines.sac import SAC
|
|
|
|
from metastable_baselines.ppo.policies import MlpPolicy as MlpPolicyPPO
|
|
|
|
from metastable_baselines.sac.policies import MlpPolicy as MlpPolicySAC
|
2022-09-03 11:37:41 +02:00
|
|
|
from metastable_projections.projections import BaseProjectionLayer, FrobeniusProjectionLayer, WassersteinProjectionLayer, KLProjectionLayer
|
2022-06-20 23:12:42 +02:00
|
|
|
import columbus
|
2022-06-19 15:50:54 +02:00
|
|
|
|
2022-07-15 15:03:51 +02:00
|
|
|
from metastable_baselines.distributions import Strength, ParametrizationType, EnforcePositiveType, ProbSquashingType
|
|
|
|
|
2022-08-28 12:07:19 +02:00
|
|
|
import torch as th
|
|
|
|
|
2022-06-22 13:00:40 +02:00
|
|
|
root_path = '.'
|
2022-06-17 11:29:36 +02:00
|
|
|
|
2022-06-25 14:50:19 +02:00
|
|
|
|
2022-07-19 10:08:47 +02:00
|
|
|
def main(env_name='ColumbusCandyland_Aux10-v0', timesteps=1_000_000, showRes=True, saveModel=True, n_eval_episodes=0):
|
2022-06-20 23:12:42 +02:00
|
|
|
env = gym.make(env_name)
|
2022-09-03 13:08:31 +02:00
|
|
|
use_sde = True
|
2022-08-28 12:07:19 +02:00
|
|
|
# th.autograd.set_detect_anomaly(True)
|
2022-11-07 13:23:55 +01:00
|
|
|
#sac = SAC(
|
|
|
|
# MlpPolicySAC,
|
|
|
|
# env,
|
2022-08-28 20:48:02 +02:00
|
|
|
# KLProjectionLayer(trust_region_coeff=0.01),
|
2022-09-03 13:08:31 +02:00
|
|
|
#projection=WassersteinProjectionLayer(trust_region_coeff=0.01),
|
2022-11-07 13:23:55 +01:00
|
|
|
# policy_kwargs={'dist_kwargs': {'neural_strength': Strength.NONE, 'cov_strength': Strength.DIAG, 'parameterization_type':
|
|
|
|
# ParametrizationType.NONE, 'enforce_positive_type': EnforcePositiveType.ABS, 'prob_squashing_type': ProbSquashingType.NONE}},
|
|
|
|
# verbose=0,
|
|
|
|
# tensorboard_log=root_path+"/logs_tb/" +
|
|
|
|
# env_name+"/sac"+(['', '_sde'][use_sde])+"/",
|
|
|
|
# learning_rate=3e-4, # 3e-4,
|
|
|
|
# gamma=0.99,
|
2022-09-03 13:08:31 +02:00
|
|
|
#gae_lambda=0.95,
|
|
|
|
#normalize_advantage=True,
|
|
|
|
#ent_coef=0.1, # 0.1
|
|
|
|
#vf_coef=0.5,
|
2022-11-07 13:23:55 +01:00
|
|
|
# use_sde=use_sde, # False
|
|
|
|
# sde_sample_freq=8,
|
2022-09-03 13:08:31 +02:00
|
|
|
#clip_range=None # 1 # 0.2,
|
2022-11-07 13:23:55 +01:00
|
|
|
#)
|
|
|
|
trl_frob = PPO(
|
|
|
|
MlpPolicyPPO,
|
|
|
|
env,
|
|
|
|
projection=FrobeniusProjectionLayer(),
|
|
|
|
verbose=0,
|
|
|
|
tensorboard_log=root_path+"/logs_tb/"+env_name +
|
|
|
|
"/trl_frob"+(['', '_sde'][use_sde])+"/",
|
|
|
|
learning_rate=3e-4,
|
|
|
|
gamma=0.99,
|
|
|
|
gae_lambda=0.95,
|
|
|
|
normalize_advantage=True,
|
|
|
|
ent_coef=0.03, # 0.1
|
|
|
|
vf_coef=0.5,
|
|
|
|
use_sde=use_sde,
|
|
|
|
clip_range=2, # 0.2
|
2022-06-25 14:50:19 +02:00
|
|
|
)
|
2022-06-17 11:29:36 +02:00
|
|
|
|
2022-11-07 13:23:55 +01:00
|
|
|
#print('SAC:')
|
|
|
|
#testModel(sac, timesteps, showRes,
|
2022-07-15 15:03:51 +02:00
|
|
|
# saveModel, n_eval_episodes)
|
2022-11-07 13:23:55 +01:00
|
|
|
print('TRL_frob:')
|
|
|
|
testModel(trl_frob, timesteps, showRes,
|
|
|
|
saveModel, n_eval_episodes)
|
2022-06-17 11:29:36 +02:00
|
|
|
|
|
|
|
|
2022-08-06 14:37:30 +02:00
|
|
|
def full(env_name='ColumbusCandyland_Aux10-v0', timesteps=200_000, saveModel=True, n_eval_episodes=4):
|
2022-07-19 10:08:47 +02:00
|
|
|
env = gym.make(env_name)
|
|
|
|
use_sde = False
|
2022-08-06 14:37:30 +02:00
|
|
|
skip_num = 4 # 10 (/ start at index)
|
|
|
|
sac = False
|
2022-07-19 10:08:47 +02:00
|
|
|
Model = [PPO, SAC][sac]
|
|
|
|
Policy = [MlpPolicyPPO, MlpPolicySAC][sac]
|
2022-08-06 14:37:30 +02:00
|
|
|
projection = FrobeniusProjectionLayer()
|
|
|
|
#projection = BaseProjectionLayer()
|
2022-07-19 10:08:47 +02:00
|
|
|
|
|
|
|
gen = enumerate(get_legal_setups(
|
|
|
|
allowedEPTs=[EnforcePositiveType.SOFTPLUS, EnforcePositiveType.ABS]))
|
|
|
|
for i in range(skip_num):
|
|
|
|
gen.__next__()
|
|
|
|
for i, setup in gen:
|
|
|
|
(ps, cs, ept, pt) = setup
|
|
|
|
print('{'+str(i)+'}: '+str(setup))
|
|
|
|
model = Model(
|
|
|
|
Policy,
|
|
|
|
env,
|
2022-08-06 14:37:30 +02:00
|
|
|
projection=projection,
|
2022-07-19 10:08:47 +02:00
|
|
|
policy_kwargs={'dist_kwargs': {'neural_strength': ps, 'cov_strength': cs, 'parameterization_type':
|
2022-08-06 14:37:30 +02:00
|
|
|
pt, 'enforce_positive_type': ept, 'prob_squashing_type': ProbSquashingType.TANH}},
|
2022-07-19 10:08:47 +02:00
|
|
|
verbose=0,
|
|
|
|
tensorboard_log=root_path+"/logs_tb/" +
|
2022-08-06 14:37:30 +02:00
|
|
|
env_name+"/"+['ppo', 'sac'][sac]+"_" + 'TANH_' +
|
2022-07-19 10:08:47 +02:00
|
|
|
("_".join([str(s) for s in setup])+['', '_sde'][use_sde])+"/",
|
2022-08-06 14:37:30 +02:00
|
|
|
learning_rate=3e-4,
|
|
|
|
gamma=0.99,
|
|
|
|
gae_lambda=0.95,
|
|
|
|
normalize_advantage=True,
|
|
|
|
ent_coef=0.02, # 0.1
|
|
|
|
vf_coef=0.5,
|
2022-07-19 10:08:47 +02:00
|
|
|
use_sde=use_sde, # False
|
2022-08-06 14:37:30 +02:00
|
|
|
clip_range=1 # 0.2,
|
2022-07-19 10:08:47 +02:00
|
|
|
)
|
|
|
|
|
|
|
|
testModel(model, timesteps, False,
|
|
|
|
saveModel, n_eval_episodes)
|
|
|
|
|
|
|
|
|
2022-06-25 14:50:19 +02:00
|
|
|
def testModel(model, timesteps, showRes=False, saveModel=False, n_eval_episodes=16):
|
2022-06-17 11:29:36 +02:00
|
|
|
env = model.get_env()
|
2022-06-29 12:46:57 +02:00
|
|
|
try:
|
|
|
|
model.learn(timesteps)
|
|
|
|
except KeyboardInterrupt:
|
|
|
|
print('[!] Training Terminated')
|
|
|
|
pass
|
2022-06-17 11:29:36 +02:00
|
|
|
|
2022-06-20 23:12:42 +02:00
|
|
|
if saveModel:
|
|
|
|
now = datetime.datetime.now().strftime('%d.%m.%Y-%H:%M')
|
2022-06-25 14:50:19 +02:00
|
|
|
loc = root_path+'/models/' + \
|
|
|
|
model.tensorboard_log.replace(
|
|
|
|
root_path+'/logs_tb/', '').replace('/', '_')+now+'.zip'
|
2022-06-22 13:00:40 +02:00
|
|
|
model.save(loc)
|
2022-06-20 23:12:42 +02:00
|
|
|
|
2022-06-19 20:34:04 +02:00
|
|
|
if n_eval_episodes:
|
2022-06-25 14:50:19 +02:00
|
|
|
mean_reward, std_reward = evaluate_policy(
|
|
|
|
model, env, n_eval_episodes=n_eval_episodes, deterministic=False)
|
|
|
|
print('Reward: '+str(round(mean_reward, 3)) +
|
|
|
|
'±'+str(round(std_reward, 2)))
|
2022-06-17 11:29:36 +02:00
|
|
|
|
|
|
|
if showRes:
|
2022-06-19 20:34:04 +02:00
|
|
|
input('<ready?>')
|
2022-06-17 11:29:36 +02:00
|
|
|
obs = env.reset()
|
|
|
|
# Evaluate the agent
|
|
|
|
episode_reward = 0
|
2022-06-22 13:12:55 +02:00
|
|
|
while True:
|
2022-06-25 14:50:19 +02:00
|
|
|
time.sleep(1/30)
|
2022-06-17 11:29:36 +02:00
|
|
|
action, _ = model.predict(obs, deterministic=False)
|
|
|
|
obs, reward, done, info = env.step(action)
|
|
|
|
env.render()
|
|
|
|
episode_reward += reward
|
|
|
|
if done:
|
2022-07-15 15:03:51 +02:00
|
|
|
# print("Reward:", episode_reward)
|
2022-06-17 11:29:36 +02:00
|
|
|
episode_reward = 0.0
|
|
|
|
obs = env.reset()
|
|
|
|
env.reset()
|
|
|
|
|
2022-06-25 14:50:19 +02:00
|
|
|
|
|
|
|
if __name__ == '__main__':
|
2022-08-22 15:05:42 +02:00
|
|
|
main('LunarLanderContinuous-v2')
|
2022-06-30 20:40:30 +02:00
|
|
|
# main('ColumbusJustState-v0')
|
2022-07-19 10:08:47 +02:00
|
|
|
# main('ColumbusStateWithBarriers-v0')
|
|
|
|
# full('ColumbusEasierObstacles-v0')
|
2022-08-22 15:05:42 +02:00
|
|
|
# main('ColumbusSingle-v0')
|
2022-08-06 14:37:30 +02:00
|
|
|
# full('LunarLanderContinuous-v2')
|