Fixed bug for logging std-estimates when using batched data

This commit is contained in:
Dominik Moritz Roth 2022-07-16 15:18:24 +02:00
parent 4a24381f46
commit d2d84d3287

View File

@ -360,13 +360,18 @@ class PPO(GaussianRolloutCollectorAuxclass, OnPolicyAlgorithm):
if hasattr(self.policy, "log_std"): if hasattr(self.policy, "log_std"):
self.logger.record( self.logger.record(
"train/std", th.exp(self.policy.log_std).mean().item()) "train/std", th.exp(self.policy.log_std).mean().item())
if hasattr(self.policy, "chol"): elif hasattr(self.policy, "chol"):
if len(self.policy.chol.shape) == 1: if len(self.policy.chol.shape) == 1:
self.logger.record( self.logger.record(
"train/std", th.mean(self.policy.chol).mean().item()) "train/std", th.mean(self.policy.chol).mean().item())
else: else:
if len(self.policy.chol.shape) == 2:
chol = self.policy.chol
else:
# TODO: Maybe use a broader sample?
chol = self.policy.chol[0]
self.logger.record( self.logger.record(
"train/std", th.mean(th.sqrt(th.diagonal(self.policy.chol.T @ self.policy.chol, dim1=-2, dim2=-1))).mean().item()) "train/std", th.mean(th.sqrt(th.diagonal(chol.T @ chol, dim1=-2, dim2=-1))).mean().item())
self.logger.record("train/n_updates", self.logger.record("train/n_updates",
self._n_updates, exclude="tensorboard") self._n_updates, exclude="tensorboard")