diff --git a/projections_orig/__init__.py b/projections_orig/__init__.py
new file mode 100644
index 0000000..a578185
--- /dev/null
+++ b/projections_orig/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) 2021 Robert Bosch GmbH
+# Author: Fabian Otto
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
diff --git a/projections_orig/base_projection_layer.py b/projections_orig/base_projection_layer.py
new file mode 100644
index 0000000..3a881af
--- /dev/null
+++ b/projections_orig/base_projection_layer.py
@@ -0,0 +1,374 @@
+# Copyright (c) 2021 Robert Bosch GmbH
+# Author: Fabian Otto
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import copy
+import math
+import torch as ch
+from typing import Tuple, Union
+
+from trust_region_projections.models.policy.abstract_gaussian_policy import AbstractGaussianPolicy
+from trust_region_projections.utils.network_utils import get_optimizer
+from trust_region_projections.utils.projection_utils import gaussian_kl, get_entropy_schedule
+from trust_region_projections.utils.torch_utils import generate_minibatches, select_batch, tensorize
+
+
+def entropy_inequality_projection(policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ beta: Union[float, ch.Tensor]):
+ """
+ Projects std to satisfy an entropy INEQUALITY constraint.
+ Args:
+ policy: policy instance
+ p: current distribution
+ beta: target entropy for EACH std or general bound for all stds
+
+ Returns:
+ projected std that satisfies the entropy bound
+ """
+ mean, std = p
+ k = std.shape[-1]
+ batch_shape = std.shape[:-2]
+
+ ent = policy.entropy(p)
+ mask = ent < beta
+
+ # if nothing has to be projected skip computation
+ if (~mask).all():
+ return p
+
+ alpha = ch.ones(batch_shape, dtype=std.dtype, device=std.device)
+ alpha[mask] = ch.exp((beta[mask] - ent[mask]) / k)
+
+ proj_std = ch.einsum('ijk,i->ijk', std, alpha)
+ return mean, ch.where(mask[..., None, None], proj_std, std)
+
+
+def entropy_equality_projection(policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ beta: Union[float, ch.Tensor]):
+ """
+ Projects std to satisfy an entropy EQUALITY constraint.
+ Args:
+ policy: policy instance
+ p: current distribution
+ beta: target entropy for EACH std or general bound for all stds
+
+ Returns:
+ projected std that satisfies the entropy bound
+ """
+ mean, std = p
+ k = std.shape[-1]
+
+ ent = policy.entropy(p)
+ alpha = ch.exp((beta - ent) / k)
+ proj_std = ch.einsum('ijk,i->ijk', std, alpha)
+ return mean, proj_std
+
+
+def mean_projection(mean: ch.Tensor, old_mean: ch.Tensor, maha: ch.Tensor, eps: ch.Tensor):
+ """
+ Projects the mean based on the Mahalanobis objective and trust region.
+ Args:
+ mean: current mean vectors
+ old_mean: old mean vectors
+ maha: Mahalanobis distance between the two mean vectors
+ eps: trust region bound
+
+ Returns:
+ projected mean that satisfies the trust region
+ """
+ batch_shape = mean.shape[:-1]
+ mask = maha > eps
+
+ ################################################################################################################
+ # mean projection maha
+
+ # if nothing has to be projected skip computation
+ if mask.any():
+ omega = ch.ones(batch_shape, dtype=mean.dtype, device=mean.device)
+ omega[mask] = ch.sqrt(maha[mask] / eps) - 1.
+ omega = ch.max(-omega, omega)[..., None]
+
+ m = (mean + omega * old_mean) / (1 + omega + 1e-16)
+ proj_mean = ch.where(mask[..., None], m, mean)
+ else:
+ proj_mean = mean
+
+ return proj_mean
+
+
+class BaseProjectionLayer(object):
+
+ def __init__(self,
+ proj_type: str = "",
+ mean_bound: float = 0.03,
+ cov_bound: float = 1e-3,
+ trust_region_coeff: float = 0.0,
+ scale_prec: bool = True,
+
+ entropy_schedule: Union[None, str] = None,
+ action_dim: Union[None, int] = None,
+ total_train_steps: Union[None, int] = None,
+ target_entropy: float = 0.0,
+ temperature: float = 0.5,
+ entropy_eq: bool = False,
+ entropy_first: bool = False,
+
+ do_regression: bool = False,
+ regression_iters: int = 1000,
+ regression_lr: int = 3e-4,
+ optimizer_type_reg: str = "adam",
+
+ cpu: bool = True,
+ dtype: ch.dtype = ch.float32,
+ ):
+
+ """
+ Base projection layer, which can be used to compute metrics for non-projection approaches.
+ Args:
+ proj_type: Which type of projection to use. None specifies no projection and uses the TRPO objective.
+ mean_bound: projection bound for the step size w.r.t. mean
+ cov_bound: projection bound for the step size w.r.t. covariance matrix
+ trust_region_coeff: Coefficient for projection regularization loss term.
+ scale_prec: If true used mahalanobis distance for projections instead of euclidean with Sigma_old^-1.
+ entropy_schedule: Schedule type for entropy projection, one of 'linear', 'exp', None.
+ action_dim: number of action dimensions to scale exp decay correctly.
+ total_train_steps: total number of training steps to compute appropriate decay over time.
+ target_entropy: projection bound for the entropy of the covariance matrix
+ temperature: temperature decay for exponential entropy bound
+ entropy_eq: Use entropy equality constraints.
+ entropy_first: Project entropy before trust region.
+ do_regression: Conduct additional regression steps after the the policy steps to match projection and policy.
+ regression_iters: Number of regression steps.
+ regression_lr: Regression learning rate.
+ optimizer_type_reg: Optimizer for regression.
+ cpu: Compute on CPU only.
+ dtype: Data type to use, either of float32 or float64. The later might be necessary for higher
+ dimensions in order to learn the full covariance.
+ """
+
+ # projection and bounds
+ self.proj_type = proj_type
+ self.mean_bound = tensorize(mean_bound, cpu=cpu, dtype=dtype)
+ self.cov_bound = tensorize(cov_bound, cpu=cpu, dtype=dtype)
+ self.trust_region_coeff = trust_region_coeff
+ self.scale_prec = scale_prec
+
+ # projection utils
+ assert (action_dim and total_train_steps) if entropy_schedule else True
+ self.entropy_proj = entropy_equality_projection if entropy_eq else entropy_inequality_projection
+ self.entropy_schedule = get_entropy_schedule(entropy_schedule, total_train_steps, dim=action_dim)
+ self.target_entropy = tensorize(target_entropy, cpu=cpu, dtype=dtype)
+ self.entropy_first = entropy_first
+ self.entropy_eq = entropy_eq
+ self.temperature = temperature
+ self._initial_entropy = None
+
+ # regression
+ self.do_regression = do_regression
+ self.regression_iters = regression_iters
+ self.lr_reg = regression_lr
+ self.optimizer_type_reg = optimizer_type_reg
+
+ def __call__(self, policy, p: Tuple[ch.Tensor, ch.Tensor], q, step, *args, **kwargs):
+ # entropy_bound = self.policy.entropy(q) - self.target_entropy
+ entropy_bound = self.entropy_schedule(self.initial_entropy, self.target_entropy, self.temperature,
+ step) * p[0].new_ones(p[0].shape[0])
+ return self._projection(policy, p, q, self.mean_bound, self.cov_bound, entropy_bound, **kwargs)
+
+ def _trust_region_projection(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ q: Tuple[ch.Tensor, ch.Tensor], eps: ch.Tensor, eps_cov: ch.Tensor, **kwargs):
+ """
+ Hook for implementing the specific trust region projection
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ eps: mean trust region bound
+ eps_cov: covariance trust region bound
+ **kwargs:
+
+ Returns:
+ projected
+ """
+ return p
+
+ # @final
+ def _projection(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ q: Tuple[ch.Tensor, ch.Tensor], eps: ch.Tensor, eps_cov: ch.Tensor, beta: ch.Tensor, **kwargs):
+ """
+ Template method with hook _trust_region_projection() to encode specific functionality.
+ (Optional) entropy projection is executed before or after as specified by entropy_first.
+ Do not override this. For Python >= 3.8 you can use the @final decorator to enforce not overwriting.
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ eps: mean trust region bound
+ eps_cov: covariance trust region bound
+ beta: entropy bound
+ **kwargs:
+
+ Returns:
+ projected mean, projected std
+ """
+
+ ####################################################################################################################
+ # entropy projection in the beginning
+ if self.entropy_first:
+ p = self.entropy_proj(policy, p, beta)
+
+ ####################################################################################################################
+ # trust region projection for mean and cov bounds
+ proj_mean, proj_std = self._trust_region_projection(policy, p, q, eps, eps_cov, **kwargs)
+
+ ####################################################################################################################
+ # entropy projection in the end
+ if self.entropy_first:
+ return proj_mean, proj_std
+
+ return self.entropy_proj(policy, (proj_mean, proj_std), beta)
+
+ @property
+ def initial_entropy(self):
+ return self._initial_entropy
+
+ @initial_entropy.setter
+ def initial_entropy(self, entropy):
+ if self.initial_entropy is None:
+ self._initial_entropy = entropy
+
+ def trust_region_value(self, policy, p, q):
+ """
+ Computes the KL divergence between two Gaussian distributions p and q.
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ Returns:
+ Mean and covariance part of the trust region metric.
+ """
+ return gaussian_kl(policy, p, q)
+
+ def get_trust_region_loss(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ proj_p: Tuple[ch.Tensor, ch.Tensor]):
+ """
+ Compute the trust region loss to ensure policy output and projection stay close.
+ Args:
+ policy: policy instance
+ proj_p: projected distribution
+ p: predicted distribution from network output
+
+ Returns:
+ trust region loss
+ """
+ p_target = (proj_p[0].detach(), proj_p[1].detach())
+ mean_diff, cov_diff = self.trust_region_value(policy, p, p_target)
+
+ delta_loss = (mean_diff + cov_diff if policy.contextual_std else mean_diff).mean()
+
+ return delta_loss * self.trust_region_coeff
+
+ def compute_metrics(self, policy, p, q) -> dict:
+ """
+ Returns dict with constraint metrics.
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+
+ Returns:
+ dict with constraint metrics
+ """
+ with ch.no_grad():
+ entropy_old = policy.entropy(q)
+ entropy = policy.entropy(p)
+ mean_kl, cov_kl = gaussian_kl(policy, p, q)
+ kl = mean_kl + cov_kl
+
+ mean_diff, cov_diff = self.trust_region_value(policy, p, q)
+
+ combined_constraint = mean_diff + cov_diff
+ entropy_diff = entropy_old - entropy
+
+ return {'kl': kl.detach().mean(),
+ 'constraint': combined_constraint.mean(),
+ 'mean_constraint': mean_diff.mean(),
+ 'cov_constraint': cov_diff.mean(),
+ 'entropy': entropy.mean(),
+ 'entropy_diff': entropy_diff.mean(),
+ 'kl_max': kl.max(),
+ 'constraint_max': combined_constraint.max(),
+ 'mean_constraint_max': mean_diff.max(),
+ 'cov_constraint_max': cov_diff.max(),
+ 'entropy_max': entropy.max(),
+ 'entropy_diff_max': entropy_diff.max()
+ }
+
+ def trust_region_regression(self, policy: AbstractGaussianPolicy, obs: ch.Tensor, q: Tuple[ch.Tensor, ch.Tensor],
+ n_minibatches: int, global_steps: int):
+ """
+ Take additional regression steps to match projection output and policy output.
+ The policy parameters are updated in-place.
+ Args:
+ policy: policy instance
+ obs: collected observations from trajectories
+ q: old distributions
+ n_minibatches: split the rollouts into n_minibatches.
+ global_steps: current number of steps, required for projection
+ Returns:
+ dict with mean of regession loss
+ """
+
+ if not self.do_regression:
+ return {}
+
+ policy_unprojected = copy.deepcopy(policy)
+ optim_reg = get_optimizer(self.optimizer_type_reg, policy_unprojected.parameters(), learning_rate=self.lr_reg)
+ optim_reg.reset()
+
+ reg_losses = obs.new_tensor(0.)
+
+ # get current projected values --> targets for regression
+ p_flat = policy(obs)
+ p_target = self(policy, p_flat, q, global_steps)
+
+ for _ in range(self.regression_iters):
+ batch_indices = generate_minibatches(obs.shape[0], n_minibatches)
+
+ # Minibatches SGD
+ for indices in batch_indices:
+ batch = select_batch(indices, obs, p_target[0], p_target[1])
+ b_obs, b_target_mean, b_target_std = batch
+ proj_p = (b_target_mean.detach(), b_target_std.detach())
+
+ p = policy_unprojected(b_obs)
+
+ # invert scaling with coeff here as we do not have to balance with other losses
+ loss = self.get_trust_region_loss(policy, p, proj_p) / self.trust_region_coeff
+
+ optim_reg.zero_grad()
+ loss.backward()
+ optim_reg.step()
+ reg_losses += loss.detach()
+
+ policy.load_state_dict(policy_unprojected.state_dict())
+
+ if not policy.contextual_std:
+ # set policy with projection value.
+ # In non-contextual cases we have only one cov, so the projection is the same.
+ policy.set_std(p_target[1][0])
+
+ steps = self.regression_iters * (math.ceil(obs.shape[0] / n_minibatches))
+ return {"regression_loss": (reg_losses / steps).detach()}
diff --git a/projections_orig/frob_projection_layer.py b/projections_orig/frob_projection_layer.py
new file mode 100644
index 0000000..8d338ce
--- /dev/null
+++ b/projections_orig/frob_projection_layer.py
@@ -0,0 +1,97 @@
+# Copyright (c) 2021 Robert Bosch GmbH
+# Author: Fabian Otto
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import torch as ch
+from typing import Tuple
+
+from trust_region_projections.models.policy.abstract_gaussian_policy import AbstractGaussianPolicy
+from trust_region_projections.projections.base_projection_layer import BaseProjectionLayer, mean_projection
+from trust_region_projections.utils.projection_utils import gaussian_frobenius
+
+
+class FrobeniusProjectionLayer(BaseProjectionLayer):
+
+ def _trust_region_projection(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ q: Tuple[ch.Tensor, ch.Tensor], eps: ch.Tensor, eps_cov: ch.Tensor, **kwargs):
+ """
+ Runs Frobenius projection layer and constructs cholesky of covariance
+
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ eps: (modified) kl bound/ kl bound for mean part
+ eps_cov: (modified) kl bound for cov part
+ beta: (modified) entropy bound
+ **kwargs:
+ Returns: mean, cov cholesky
+ """
+
+ mean, chol = p
+ old_mean, old_chol = q
+ batch_shape = mean.shape[:-1]
+
+ ####################################################################################################################
+ # precompute mean and cov part of frob projection, which are used for the projection.
+ mean_part, cov_part, cov, cov_old = gaussian_frobenius(policy, p, q, self.scale_prec, True)
+
+ ################################################################################################################
+ # mean projection maha/euclidean
+
+ proj_mean = mean_projection(mean, old_mean, mean_part, eps)
+
+ ################################################################################################################
+ # cov projection frobenius
+
+ cov_mask = cov_part > eps_cov
+
+ if cov_mask.any():
+ # alpha = ch.where(fro_norm_sq > eps_cov, ch.sqrt(fro_norm_sq / eps_cov) - 1., ch.tensor(1.))
+ eta = ch.ones(batch_shape, dtype=chol.dtype, device=chol.device)
+ eta[cov_mask] = ch.sqrt(cov_part[cov_mask] / eps_cov) - 1.
+ eta = ch.max(-eta, eta)
+
+ new_cov = (cov + ch.einsum('i,ijk->ijk', eta, cov_old)) / (1. + eta + 1e-16)[..., None, None]
+ proj_chol = ch.where(cov_mask[..., None, None], ch.cholesky(new_cov), chol)
+ else:
+ proj_chol = chol
+
+ return proj_mean, proj_chol
+
+ def trust_region_value(self, policy, p, q):
+ """
+ Computes the Frobenius metric between two Gaussian distributions p and q.
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ Returns:
+ mean and covariance part of Frobenius metric
+ """
+ return gaussian_frobenius(policy, p, q, self.scale_prec)
+
+ def get_trust_region_loss(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ proj_p: Tuple[ch.Tensor, ch.Tensor]):
+
+ mean_diff, _ = self.trust_region_value(policy, p, proj_p)
+ if policy.contextual_std:
+ # Compute MSE here, because we found the Frobenius norm tends to generate values that explode for the cov
+ cov_diff = (p[1] - proj_p[1]).pow(2).sum([-1, -2])
+ delta_loss = (mean_diff + cov_diff).mean()
+ else:
+ delta_loss = mean_diff.mean()
+
+ return delta_loss * self.trust_region_coeff
diff --git a/projections_orig/kl_projection_layer.py b/projections_orig/kl_projection_layer.py
new file mode 100644
index 0000000..ca5acd5
--- /dev/null
+++ b/projections_orig/kl_projection_layer.py
@@ -0,0 +1,101 @@
+import cpp_projection
+import numpy as np
+import torch as ch
+from typing import Any, Tuple
+
+from trust_region_projections.models.policy.abstract_gaussian_policy import AbstractGaussianPolicy
+from trust_region_projections.projections.base_projection_layer import BaseProjectionLayer, mean_projection
+from trust_region_projections.utils.projection_utils import gaussian_kl
+from trust_region_projections.utils.torch_utils import get_numpy
+
+
+class KLProjectionLayer(BaseProjectionLayer):
+
+ def _trust_region_projection(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ q: Tuple[ch.Tensor, ch.Tensor], eps: ch.Tensor, eps_cov: ch.Tensor, **kwargs):
+ """
+ Runs KL projection layer and constructs cholesky of covariance
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ eps: (modified) kl bound/ kl bound for mean part
+ eps_cov: (modified) kl bound for cov part
+ **kwargs:
+
+ Returns:
+ projected mean, projected cov cholesky
+ """
+ mean, std = p
+ old_mean, old_std = q
+
+ if not policy.contextual_std:
+ # only project first one to reduce number of numerical optimizations
+ std = std[:1]
+ old_std = old_std[:1]
+
+ ################################################################################################################
+ # project mean with closed form
+ mean_part, _ = gaussian_kl(policy, p, q)
+ proj_mean = mean_projection(mean, old_mean, mean_part, eps)
+
+ cov = policy.covariance(std)
+ old_cov = policy.covariance(old_std)
+
+ if policy.is_diag:
+ proj_cov = KLProjectionGradFunctionDiagCovOnly.apply(cov.diagonal(dim1=-2, dim2=-1),
+ old_cov.diagonal(dim1=-2, dim2=-1),
+ eps_cov)
+ proj_std = proj_cov.sqrt().diag_embed()
+ else:
+ raise NotImplementedError("The KL projection currently does not support full covariance matrices.")
+
+ if not policy.contextual_std:
+ # scale first std back to batchsize
+ proj_std = proj_std.expand(mean.shape[0], -1, -1)
+
+ return proj_mean, proj_std
+
+
+class KLProjectionGradFunctionDiagCovOnly(ch.autograd.Function):
+ projection_op = None
+
+ @staticmethod
+ def get_projection_op(batch_shape, dim, max_eval=100):
+ if not KLProjectionGradFunctionDiagCovOnly.projection_op:
+ KLProjectionGradFunctionDiagCovOnly.projection_op = \
+ cpp_projection.BatchedDiagCovOnlyProjection(batch_shape, dim, max_eval=max_eval)
+ return KLProjectionGradFunctionDiagCovOnly.projection_op
+
+ @staticmethod
+ def forward(ctx: Any, *args: Any, **kwargs: Any) -> Any:
+ std, old_std, eps_cov = args
+
+ batch_shape = std.shape[0]
+ dim = std.shape[-1]
+
+ cov_np = get_numpy(std)
+ old_std = get_numpy(old_std)
+ eps = get_numpy(eps_cov) * np.ones(batch_shape)
+
+ # p_op = cpp_projection.BatchedDiagCovOnlyProjection(batch_shape, dim)
+ # ctx.proj = projection_op
+
+ p_op = KLProjectionGradFunctionDiagCovOnly.get_projection_op(batch_shape, dim)
+ ctx.proj = p_op
+
+ proj_std = p_op.forward(eps, old_std, cov_np)
+
+ return std.new(proj_std)
+
+ @staticmethod
+ def backward(ctx: Any, *grad_outputs: Any) -> Any:
+ projection_op = ctx.proj
+ d_std, = grad_outputs
+
+ d_std_np = get_numpy(d_std)
+ d_std_np = np.atleast_2d(d_std_np)
+ df_stds = projection_op.backward(d_std_np)
+ df_stds = np.atleast_2d(df_stds)
+
+ return d_std.new(df_stds), None, None
diff --git a/projections_orig/papi_projection.py b/projections_orig/papi_projection.py
new file mode 100644
index 0000000..b52db75
--- /dev/null
+++ b/projections_orig/papi_projection.py
@@ -0,0 +1,233 @@
+# Copyright (c) 2021 Robert Bosch GmbH
+# Author: Fabian Otto
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import logging
+
+import copy
+import numpy as np
+import torch as ch
+from typing import Tuple, Union
+
+from trust_region_projections.utils.projection_utils import gaussian_kl
+from trust_region_projections.models.policy.abstract_gaussian_policy import AbstractGaussianPolicy
+from trust_region_projections.projections.base_projection_layer import BaseProjectionLayer
+from trust_region_projections.utils.torch_utils import torch_batched_trace
+
+logger = logging.getLogger("papi_projection")
+
+
+class PAPIProjection(BaseProjectionLayer):
+
+ def __init__(self,
+ proj_type: str = "",
+ mean_bound: float = 0.015,
+ cov_bound: float = 0.0,
+
+ entropy_eq: bool = False,
+ entropy_first: bool = True,
+
+ cpu: bool = True,
+ dtype: ch.dtype = ch.float32,
+ **kwargs
+ ):
+
+ """
+ PAPI projection, which can be used after each training epoch to satisfy the trust regions.
+ Args:
+ proj_type: Which type of projection to use. None specifies no projection and uses the TRPO objective.
+ mean_bound: projection bound for the step size,
+ PAPI only has a joint KL constraint, mean and cov bound are summed for this bound.
+ cov_bound: projection bound for the step size,
+ PAPI only has a joint KL constraint, mean and cov bound are summed for this bound.
+ entropy_eq: Use entropy equality constraints.
+ entropy_first: Project entropy before trust region.
+ cpu: Compute on CPU only.
+ dtype: Data type to use, either of float32 or float64. The later might be necessary for higher
+ dimensions in order to learn the full covariance.
+ """
+
+ assert entropy_first
+ super().__init__(proj_type, mean_bound, cov_bound, 0.0, False, None, None, None, 0.0, 0.0, entropy_eq,
+ entropy_first, cpu, dtype)
+
+ self.last_policies = []
+
+ def __call__(self, policy, p, q, step=0, *args, **kwargs):
+ if kwargs.get("obs"):
+ self._papi_steps(policy, q, **kwargs)
+ else:
+ return p
+
+ def _trust_region_projection(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ q: Tuple[ch.Tensor, ch.Tensor], eps: Union[ch.Tensor, float],
+ eps_cov: Union[ch.Tensor, float], **kwargs):
+ """
+ runs papi projection layer and constructs sqrt of covariance
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ eps: (modified) kl bound/ kl bound for mean part
+ eps_cov: (modified) kl bound for cov part
+ **kwargs:
+
+ Returns:
+ mean, cov sqrt
+ """
+
+ mean, chol = p
+ old_mean, old_chol = q
+ intermed_mean = kwargs.get('intermed_mean')
+
+ dtype = mean.dtype
+ device = mean.device
+
+ dim = mean.shape[-1]
+
+ ################################################################################################################
+ # Precompute basic matrices
+
+ # Joint bound
+ eps += eps_cov
+
+ I = ch.eye(dim, dtype=dtype, device=device)
+ old_precision = ch.cholesky_solve(I, old_chol)[0]
+ logdet_old = policy.log_determinant(old_chol)
+ cov = policy.covariance(chol)
+
+ ################################################################################################################
+ # compute expected KL
+ maha_part, cov_part = gaussian_kl(policy, p, q)
+ maha_part = maha_part.mean()
+ cov_part = cov_part.mean()
+
+ if intermed_mean is not None:
+ maha_intermediate = 0.5 * policy.maha(intermed_mean, old_mean, old_chol).mean()
+ mm = ch.min(maha_part, maha_intermediate)
+
+ ################################################################################################################
+ # matrix rotation/rescaling projection
+ if maha_part + cov_part > eps + 1e-6:
+ old_cov = policy.covariance(old_chol)
+
+ maha_delta = eps if intermed_mean is None else (eps - mm)
+ eta_rot = maha_delta / ch.max(maha_part + cov_part, ch.tensor(1e-16, dtype=dtype, device=device))
+ new_cov = (1 - eta_rot) * old_cov + eta_rot * cov
+ proj_chol = ch.cholesky(new_cov)
+
+ # recompute covariance part of KL for new chol
+ trace_term = 0.5 * (torch_batched_trace(old_precision @ new_cov) - dim).mean() # rotation difference
+ entropy_diff = 0.5 * (logdet_old - policy.log_determinant(proj_chol)).mean()
+
+ cov_part = trace_term + entropy_diff
+
+ else:
+ proj_chol = chol
+
+ ################################################################################################################
+ # mean interpolation projection
+ if maha_part + cov_part > eps + 1e-6:
+
+ if intermed_mean is not None:
+ a = 0.5 * policy.maha(mean, intermed_mean, old_chol).mean()
+ b = 0.5 * ((mean - intermed_mean) @ old_precision @ (intermed_mean - old_mean).T).mean()
+ c = maha_intermediate - ch.max(eps - cov_part, ch.tensor(0., dtype=dtype, device=device))
+ eta_mean = (-b + ch.sqrt(ch.max(b * b - a * c, ch.tensor(1e-16, dtype=dtype, device=device)))) / \
+ ch.max(a, ch.tensor(1e-16, dtype=dtype, device=device))
+ else:
+ eta_mean = ch.sqrt(
+ ch.max(eps - cov_part, ch.tensor(1e-16, dtype=dtype, device=device)) /
+ ch.max(maha_part, ch.tensor(1e-16, dtype=dtype, device=device)))
+ else:
+ eta_mean = ch.tensor(1., dtype=dtype, device=device)
+
+ return eta_mean, proj_chol
+
+ def _papi_steps(self, policy: AbstractGaussianPolicy, q: Tuple[ch.Tensor, ch.Tensor], obs: ch.Tensor, lr_schedule,
+ lr_schedule_vf=None):
+ """
+ Take PAPI steps after PPO finished its steps. Policy parameters are updated in-place.
+ Args:
+ policy: policy instance
+ q: old distribution
+ obs: collected observations from trajectories
+ lr_schedule: lr schedule for policy
+ lr_schedule_vf: lr schedule for vf
+
+ Returns:
+
+ """
+ assert not policy.contextual_std
+
+ # save latest policy in history
+ self.last_policies.append(copy.deepcopy(policy))
+
+ ################################################################################################################
+ # policy backtracking: out of last n policies and current one find one that satisfies the kl constraint
+
+ intermed_policy = None
+ n_backtracks = 0
+
+ for i, pi in enumerate(reversed(self.last_policies)):
+ p_prime = pi(obs)
+ mean_part, cov_part = pi.kl_divergence(p_prime, q)
+ if (mean_part + cov_part).mean() <= self.mean_bound + self.cov_bound:
+ intermed_policy = pi
+ n_backtracks = i
+ break
+
+ ################################################################################################################
+ # LR update
+
+ # reduce learning rate when appropriate policy not within the last 4 epochs
+ if n_backtracks >= 4 or intermed_policy is None:
+ # Linear learning rate annealing
+ lr_schedule.step()
+ if lr_schedule_vf:
+ lr_schedule_vf.step()
+
+ if intermed_policy is None:
+ # pop last policy and make it current one, as the updated one was poor
+ # do not keep last policy in history, otherwise we could stack the same policy multiple times.
+ if len(self.last_policies) >= 1:
+ policy.load_state_dict(self.last_policies.pop().state_dict())
+ logger.warning(f"No suitable policy found in backtracking of {len(self.last_policies)} policies.")
+ return
+
+ ################################################################################################################
+ # PAPI iterations
+
+ # We assume only non contextual covariances here, therefore we only need to project for one
+ q = (q[0], q[1][:1]) # (means, covs[:1])
+
+ # This is A from Alg. 2 [Akrour et al., 2019]
+ intermed_weight = intermed_policy.get_last_layer().detach().clone()
+ # This is A @ phi(s)
+ intermed_mean = p_prime[0].detach().clone()
+
+ entropy = policy.entropy(q)
+ entropy_bound = obs.new_tensor([-np.inf]) if entropy / self.initial_entropy > 0.5 \
+ else entropy - (self.mean_bound + self.cov_bound)
+
+ for _ in range(20):
+ eta, proj_chol = self._projection(intermed_policy, (p_prime[0], p_prime[1][:1]), q,
+ self.mean_bound, self.cov_bound, entropy_bound,
+ intermed_mean=intermed_mean)
+ intermed_policy.papi_weight_update(eta, intermed_weight)
+ intermed_policy.set_std(proj_chol[0])
+ p_prime = intermed_policy(obs)
+
+ policy.load_state_dict(intermed_policy.state_dict())
diff --git a/projections_orig/projection_factory.py b/projections_orig/projection_factory.py
new file mode 100644
index 0000000..9c38275
--- /dev/null
+++ b/projections_orig/projection_factory.py
@@ -0,0 +1,54 @@
+# Copyright (c) 2021 Robert Bosch GmbH
+# Author: Fabian Otto
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+from trust_region_projections.projections.base_projection_layer import BaseProjectionLayer
+from trust_region_projections.projections.frob_projection_layer import FrobeniusProjectionLayer
+from trust_region_projections.projections.kl_projection_layer import KLProjectionLayer
+from trust_region_projections.projections.papi_projection import PAPIProjection
+from trust_region_projections.projections.w2_projection_layer import WassersteinProjectionLayer
+
+
+def get_projection_layer(proj_type: str = "", **kwargs) -> BaseProjectionLayer:
+ """
+ Factory to generate the projection layers for all projections.
+ Args:
+ proj_type: One of None/' ', 'ppo', 'papi', 'w2', 'w2_non_com', 'frob', 'kl', or 'entropy'
+ **kwargs: arguments for projection layer
+
+ Returns:
+
+ """
+ if not proj_type or proj_type.isspace() or proj_type.lower() in ["ppo", "sac", "td3", "mpo", "entropy"]:
+ return BaseProjectionLayer(proj_type, **kwargs)
+
+ elif proj_type.lower() == "w2":
+ return WassersteinProjectionLayer(proj_type, **kwargs)
+
+ elif proj_type.lower() == "frob":
+ return FrobeniusProjectionLayer(proj_type, **kwargs)
+
+ elif proj_type.lower() == "kl":
+ return KLProjectionLayer(proj_type, **kwargs)
+
+ elif proj_type.lower() == "papi":
+ # papi has a different approach compared to our projections.
+ # It has to be applied after the training with PPO.
+ return PAPIProjection(proj_type, **kwargs)
+
+ else:
+ raise ValueError(
+ f"Invalid projection type {proj_type}."
+ f" Choose one of None/' ', 'ppo', 'papi', 'w2', 'w2_non_com', 'frob', 'kl', or 'entropy'.")
diff --git a/projections_orig/w2_projection_layer.py b/projections_orig/w2_projection_layer.py
new file mode 100644
index 0000000..bce87a3
--- /dev/null
+++ b/projections_orig/w2_projection_layer.py
@@ -0,0 +1,84 @@
+# Copyright (c) 2021 Robert Bosch GmbH
+# Author: Fabian Otto
+#
+# This program is free software: you can redistribute it and/or modify
+# it under the terms of the GNU Affero General Public License as published
+# by the Free Software Foundation, either version 3 of the License, or
+# (at your option) any later version.
+#
+# This program is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+# GNU Affero General Public License for more details.
+#
+# You should have received a copy of the GNU Affero General Public License
+# along with this program. If not, see .
+
+import torch as ch
+from typing import Tuple
+
+from trust_region_projections.models.policy.abstract_gaussian_policy import AbstractGaussianPolicy
+from trust_region_projections.projections.base_projection_layer import BaseProjectionLayer, mean_projection
+from trust_region_projections.utils.projection_utils import gaussian_wasserstein_commutative
+
+
+class WassersteinProjectionLayer(BaseProjectionLayer):
+
+ def _trust_region_projection(self, policy: AbstractGaussianPolicy, p: Tuple[ch.Tensor, ch.Tensor],
+ q: Tuple[ch.Tensor, ch.Tensor], eps: ch.Tensor, eps_cov: ch.Tensor, **kwargs):
+ """
+ Runs commutative Wasserstein projection layer and constructs sqrt of covariance
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ eps: (modified) kl bound/ kl bound for mean part
+ eps_cov: (modified) kl bound for cov part
+ **kwargs:
+
+ Returns:
+ mean, cov sqrt
+ """
+ mean, sqrt = p
+ old_mean, old_sqrt = q
+ batch_shape = mean.shape[:-1]
+
+ ####################################################################################################################
+ # precompute mean and cov part of W2, which are used for the projection.
+ # Both parts differ based on precision scaling.
+ # If activated, the mean part is the maha distance and the cov has a more complex term in the inner parenthesis.
+ mean_part, cov_part = gaussian_wasserstein_commutative(policy, p, q, self.scale_prec)
+
+ ####################################################################################################################
+ # project mean (w/ or w/o precision scaling)
+ proj_mean = mean_projection(mean, old_mean, mean_part, eps)
+
+ ####################################################################################################################
+ # project covariance (w/ or w/o precision scaling)
+
+ cov_mask = cov_part > eps_cov
+
+ if cov_mask.any():
+ # gradient issue with ch.where, it executes both paths and gives NaN gradient.
+ eta = ch.ones(batch_shape, dtype=sqrt.dtype, device=sqrt.device)
+ eta[cov_mask] = ch.sqrt(cov_part[cov_mask] / eps_cov) - 1.
+ eta = ch.max(-eta, eta)
+
+ new_sqrt = (sqrt + ch.einsum('i,ijk->ijk', eta, old_sqrt)) / (1. + eta + 1e-16)[..., None, None]
+ proj_sqrt = ch.where(cov_mask[..., None, None], new_sqrt, sqrt)
+ else:
+ proj_sqrt = sqrt
+
+ return proj_mean, proj_sqrt
+
+ def trust_region_value(self, policy, p, q):
+ """
+ Computes the Wasserstein distance between two Gaussian distributions p and q.
+ Args:
+ policy: policy instance
+ p: current distribution
+ q: old distribution
+ Returns:
+ mean and covariance part of Wasserstein distance
+ """
+ return gaussian_wasserstein_commutative(policy, p, q, scale_prec=self.scale_prec)
\ No newline at end of file
diff --git a/trl_pg/trl_pg.py b/trl_pg/trl_pg.py
index d544091..09c9c58 100644
--- a/trl_pg/trl_pg.py
+++ b/trl_pg/trl_pg.py
@@ -95,8 +95,7 @@ class TRL_PG(OnPolicyAlgorithm):
device: Union[th.device, str] = "auto",
# Different from PPO:
- importance_ratio_clip: Union[float, None] = 0.2
- #TODO: projection: BaseProjectionLayer = None,
+ projection: BaseProjectionLayer = None,
_init_setup_model: bool = True,
):
@@ -161,7 +160,7 @@ class TRL_PG(OnPolicyAlgorithm):
self.target_kl = target_kl
# Different from PPO:
- self.importance_ratio_clip = importance_ratio_clip or 0.0
+ self.projection = projection
if _init_setup_model:
self._setup_model()
@@ -191,7 +190,9 @@ class TRL_PG(OnPolicyAlgorithm):
if self.clip_range_vf is not None:
clip_range_vf = self.clip_range_vf(self._current_progress_remaining)
+ surrogate_losses = []
entropy_losses = []
+ trust_region_losses = []
pg_losses, value_losses = [], []
clip_fractions = []
@@ -221,10 +222,13 @@ class TRL_PG(OnPolicyAlgorithm):
# ratio between old and new policy, should be one at the first iteration
ratio = th.exp(log_prob - rollout_data.old_log_prob)
+ # Difference from PPO: We renamed 'policy_loss' to 'surrogate_loss'
# clipped surrogate loss
- policy_loss_1 = advantages * ratio
- policy_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
- policy_loss = -th.min(policy_loss_1, policy_loss_2).mean()
+ surrogate_loss_1 = advantages * ratio
+ surrogate_loss_2 = advantages * th.clamp(ratio, 1 - clip_range, 1 + clip_range)
+ surrogate_loss = -th.min(policy_loss_1, policy_loss_2).mean()
+
+ surrogate_losses.append(surrogate_loss.item())
# Logging
pg_losses.append(policy_loss.item())
@@ -253,7 +257,14 @@ class TRL_PG(OnPolicyAlgorithm):
entropy_losses.append(entropy_loss.item())
- loss = policy_loss + self.ent_coef * entropy_loss + self.vf_coef * value_loss
+ # Difference to PPO: Added trust_region_loss; policy_loss includes entropy_loss + trust_region_loss
+ trust_region_loss = self.projection.get_trust_region_loss()#TODO: params
+
+ trust_region_losses.append(trust_region_loss.item())
+
+ policy_loss = surrogate_loss + self.ent_coef * entropy_loss + trust_region_loss
+
+ loss = policy_loss + self.vf_coef * value_loss
# Calculate approximate form of reverse KL Divergence for early stopping
# see issue #417: https://github.com/DLR-RM/stable-baselines3/issues/417
@@ -284,7 +295,9 @@ class TRL_PG(OnPolicyAlgorithm):
explained_var = explained_variance(self.rollout_buffer.values.flatten(), self.rollout_buffer.returns.flatten())
# Logs
+ self.logger.record("train/surrogate_loss", np.mean(surrogate_losses))
self.logger.record("train/entropy_loss", np.mean(entropy_losses))
+ self.logger.record("train/trust_region_loss", np.mean(trust_region_losses))
self.logger.record("train/policy_gradient_loss", np.mean(pg_losses))
self.logger.record("train/value_loss", np.mean(value_losses))
self.logger.record("train/approx_kl", np.mean(approx_kl_divs))