From 7861821d0d463b27b05e9f4be3812aa27baa235f Mon Sep 17 00:00:00 2001 From: Dominik Roth Date: Sun, 2 Jun 2024 14:14:12 +0200 Subject: [PATCH] Worked on TRPL module --- fancy_rl/algos/trpl.py | 77 +++++++++++++++++------------------------- 1 file changed, 31 insertions(+), 46 deletions(-) diff --git a/fancy_rl/algos/trpl.py b/fancy_rl/algos/trpl.py index 0f297c5..6a04b82 100644 --- a/fancy_rl/algos/trpl.py +++ b/fancy_rl/algos/trpl.py @@ -1,11 +1,11 @@ import torch -from torchrl.modules import ActorValueOperator, ProbabilisticActor +from torchrl.modules import ProbabilisticActor from torchrl.objectives.value.advantages import GAE from fancy_rl.algos.on_policy import OnPolicy -from fancy_rl.policy import Actor, Critic, SharedModule +from fancy_rl.policy import Actor, Critic from fancy_rl.objectives import TRPLLoss -class TRPL(OnPolicy): +class PPO(OnPolicy): def __init__( self, env_spec, @@ -14,7 +14,6 @@ class TRPL(OnPolicy): critic_hidden_sizes=[64, 64], actor_activation_fn="Tanh", critic_activation_fn="Tanh", - shared_stem_sizes=[64], proj_layer_type=None, learning_rate=3e-4, n_steps=2048, @@ -28,14 +27,16 @@ class TRPL(OnPolicy): entropy_coef=0.01, critic_coef=0.5, trust_region_coef=10.0, - normalize_advantage=True, + normalize_advantage=False, device=None, env_spec_eval=None, eval_episodes=10, ): device = device or torch.device("cuda" if torch.cuda.is_available() else "cpu") + self.device = device - self.trust_region_layer = None # from proj_layer_type + self.trust_region_layer = None # TODO: from proj_layer_type + self.trust_region_coef = trust_region_coef # Initialize environment to get observation and action space sizes self.env_spec = env_spec @@ -43,55 +44,23 @@ class TRPL(OnPolicy): obs_space = env.observation_space act_space = env.action_space - # Define the shared, actor, and critic modules - self.shared_module = SharedModule(obs_space, shared_stem_sizes, actor_activation_fn, device) - self.raw_actor = Actor(self.shared_module, act_space, actor_hidden_sizes, actor_activation_fn, device) - self.critic = Critic(self.shared_module, critic_hidden_sizes, critic_activation_fn, device) - - # Perfrom projection - self.actor = self.raw_actor # TODO: Project - - # Combine into an ActorValueOperator - self.ac_module = ActorValueOperator( - self.shared_module, - self.actor, - self.critic - ) - - # Define the policy as a ProbabilisticActor - policy = ProbabilisticActor( - module=self.ac_module.get_policy_operator(), + self.critic = Critic(obs_space, critic_hidden_sizes, critic_activation_fn, device) + actor_net = Actor(obs_space, act_space, actor_hidden_sizes, actor_activation_fn, device) + raw_actor = ProbabilisticActor( + module=actor_net, in_keys=["loc", "scale"], out_keys=["action"], distribution_class=torch.distributions.Normal, return_log_prob=True ) + self.actor = raw_actor # TODO: Proj here optimizers = { "actor": torch.optim.Adam(self.actor.parameters(), lr=learning_rate), "critic": torch.optim.Adam(self.critic.parameters(), lr=learning_rate) } - self.adv_module = GAE( - gamma=self.gamma, - lmbda=self.gae_lambda, - value_network=self.critic, - average_gae=False, - ) - - self.loss_module = TRPLLoss( - actor_network=self.actor, - critic_network=self.critic, - trust_region_layer=self.trust_region_layer, - loss_critic_type='MSELoss', - entropy_coef=self.entropy_coef, - critic_coef=self.critic_coef, - trust_region_coef=self.trust_region_coef, - normalize_advantage=self.normalize_advantage, - ) - super().__init__( - policy=policy, env_spec=env_spec, loggers=loggers, optimizers=optimizers, @@ -100,15 +69,31 @@ class TRPL(OnPolicy): batch_size=batch_size, n_epochs=n_epochs, gamma=gamma, - gae_lambda=gae_lambda, total_timesteps=total_timesteps, eval_interval=eval_interval, eval_deterministic=eval_deterministic, entropy_coef=entropy_coef, critic_coef=critic_coef, normalize_advantage=normalize_advantage, - clip_range=clip_range, device=device, env_spec_eval=env_spec_eval, eval_episodes=eval_episodes, - ) \ No newline at end of file + ) + + self.adv_module = GAE( + gamma=self.gamma, + lmbda=gae_lambda, + value_network=self.critic, + average_gae=False, + ) + + self.loss_module = TRPLLoss( + actor_network=self.actor, + critic_network=self.critic, + trust_region_layer=self.trust_region_layer, + loss_critic_type='l2', + entropy_coef=self.entropy_coef, + critic_coef=self.critic_coef, + trust_region_coef=self.trust_region_coef, + normalize_advantage=self.normalize_advantage, + )