From 4080ad8135c45c0c010df2a11f3f0803a8cdad31 Mon Sep 17 00:00:00 2001 From: Dominik Roth Date: Sun, 28 Aug 2022 12:07:19 +0200 Subject: [PATCH] Removed old TODOs --- metastable_baselines/distributions/distributions.py | 3 --- metastable_baselines/ppo/policies.py | 2 -- test.py | 11 +++++++---- 3 files changed, 7 insertions(+), 9 deletions(-) diff --git a/metastable_baselines/distributions/distributions.py b/metastable_baselines/distributions/distributions.py index 06e6807..5fbac7a 100644 --- a/metastable_baselines/distributions/distributions.py +++ b/metastable_baselines/distributions/distributions.py @@ -200,7 +200,6 @@ class UniversalGaussianDistribution(SB3_Distribution): assert std_init >= 0.0, "std can not be initialized to a negative value." - # TODO: Implement SDE self.latent_sde_dim = latent_sde_dim mean_actions = nn.Linear(latent_dim, self.action_dim) @@ -348,7 +347,6 @@ class UniversalGaussianDistribution(SB3_Distribution): def get_noise(self, latent_sde: th.Tensor) -> th.Tensor: latent_sde = latent_sde if self.learn_features else latent_sde.detach() - # # TODO: Good idea? latent_sde = th.nn.functional.normalize(latent_sde, dim=-1) # Default case: only one exploration matrix if len(latent_sde) == 1 or len(latent_sde) != len(self.exploration_matrices): @@ -579,7 +577,6 @@ class CholNet(nn.Module): dim2=-1)).diag_embed() + chol.triu(1) def string(self): - # TODO return '' diff --git a/metastable_baselines/ppo/policies.py b/metastable_baselines/ppo/policies.py index 4f66ee2..6377eb9 100644 --- a/metastable_baselines/ppo/policies.py +++ b/metastable_baselines/ppo/policies.py @@ -79,8 +79,6 @@ class ActorCriticPolicy(BasePolicy): excluding the learning rate, to pass to the optimizer """ - # TODO: Allow passing of dist_kwargs into dist - def __init__( self, observation_space: gym.spaces.Space, diff --git a/test.py b/test.py index 05f8472..70f53d6 100755 --- a/test.py +++ b/test.py @@ -15,18 +15,21 @@ import columbus from metastable_baselines.distributions import Strength, ParametrizationType, EnforcePositiveType, ProbSquashingType +import torch as th + root_path = '.' def main(env_name='ColumbusCandyland_Aux10-v0', timesteps=1_000_000, showRes=True, saveModel=True, n_eval_episodes=0): env = gym.make(env_name) use_sde = False + # th.autograd.set_detect_anomaly(True) ppo = PPO( MlpPolicyPPO, env, - projection=BaseProjectionLayer(), # KLProjectionLayer(trust_region_coeff=0.01), - policy_kwargs={'dist_kwargs': {'neural_strength': Strength.NONE, 'cov_strength': Strength.DIAG, 'parameterization_type': - ParametrizationType.NONE, 'enforce_positive_type': EnforcePositiveType.ABS, 'prob_squashing_type': ProbSquashingType.NONE}}, + projection=BaseProjectionLayer(), # KLProjectionLayer(trust_region_coeff=0.01), + policy_kwargs={'dist_kwargs': {'neural_strength': Strength.NONE, 'cov_strength': Strength.FULL, 'parameterization_type': + ParametrizationType.CHOL, 'enforce_positive_type': EnforcePositiveType.ABS, 'prob_squashing_type': ProbSquashingType.NONE}}, verbose=0, tensorboard_log=root_path+"/logs_tb/" + env_name+"/ppo"+(['', '_sde'][use_sde])+"/", @@ -37,7 +40,7 @@ def main(env_name='ColumbusCandyland_Aux10-v0', timesteps=1_000_000, showRes=Tru ent_coef=0.1, # 0.1 vf_coef=0.5, use_sde=use_sde, # False - clip_range=0.2 # 1 # 0.2, + clip_range=None # 1 # 0.2, ) # trl_frob = PPO( # MlpPolicy,