overriding collect_rollouts
This commit is contained in:
parent
1a49a412c0
commit
866f863d70
@ -10,6 +10,10 @@ from stable_baselines3.common.on_policy_algorithm import OnPolicyAlgorithm
|
|||||||
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy
|
from stable_baselines3.common.policies import ActorCriticCnnPolicy, ActorCriticPolicy, BasePolicy, MultiInputActorCriticPolicy
|
||||||
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
|
from stable_baselines3.common.type_aliases import GymEnv, MaybeCallback, Schedule
|
||||||
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
|
from stable_baselines3.common.utils import explained_variance, get_schedule_fn
|
||||||
|
from stable_baselines3.common.vec_env import VecEnv
|
||||||
|
from stable_baselines3.common.buffers import RolloutBuffer
|
||||||
|
from stable_baselines3.common.callbacks import BaseCallback
|
||||||
|
from stable_baselines3.common.utils import obs_as_tensor
|
||||||
|
|
||||||
|
|
||||||
class TRL_PG(OnPolicyAlgorithm):
|
class TRL_PG(OnPolicyAlgorithm):
|
||||||
@ -379,3 +383,102 @@ class TRL_PG(OnPolicyAlgorithm):
|
|||||||
eval_log_path=eval_log_path,
|
eval_log_path=eval_log_path,
|
||||||
reset_num_timesteps=reset_num_timesteps,
|
reset_num_timesteps=reset_num_timesteps,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# This is new compared to PPO.
|
||||||
|
# TRL requires us to also save the original mean and std in our rollouts
|
||||||
|
def collect_rollouts(
|
||||||
|
self,
|
||||||
|
env: VecEnv,
|
||||||
|
callback: BaseCallback,
|
||||||
|
rollout_buffer: RolloutBuffer,
|
||||||
|
n_rollout_steps: int,
|
||||||
|
) -> bool:
|
||||||
|
"""
|
||||||
|
Collect experiences using the current policy and fill a ``RolloutBuffer``.
|
||||||
|
The term rollout here refers to the model-free notion and should not
|
||||||
|
be used with the concept of rollout used in model-based RL or planning.
|
||||||
|
:param env: The training environment
|
||||||
|
:param callback: Callback that will be called at each step
|
||||||
|
(and at the beginning and end of the rollout)
|
||||||
|
:param rollout_buffer: Buffer to fill with rollouts
|
||||||
|
:param n_steps: Number of experiences to collect per environment
|
||||||
|
:return: True if function returned with at least `n_rollout_steps`
|
||||||
|
collected, False if callback terminated rollout prematurely.
|
||||||
|
"""
|
||||||
|
assert self._last_obs is not None, "No previous observation was provided"
|
||||||
|
# Switch to eval mode (this affects batch norm / dropout)
|
||||||
|
self.policy.set_training_mode(False)
|
||||||
|
|
||||||
|
n_steps = 0
|
||||||
|
rollout_buffer.reset()
|
||||||
|
# Sample new weights for the state dependent exploration
|
||||||
|
if self.use_sde:
|
||||||
|
self.policy.reset_noise(env.num_envs)
|
||||||
|
|
||||||
|
callback.on_rollout_start()
|
||||||
|
|
||||||
|
while n_steps < n_rollout_steps:
|
||||||
|
if self.use_sde and self.sde_sample_freq > 0 and n_steps % self.sde_sample_freq == 0:
|
||||||
|
# Sample a new noise matrix
|
||||||
|
self.policy.reset_noise(env.num_envs)
|
||||||
|
|
||||||
|
with th.no_grad():
|
||||||
|
# Convert to pytorch tensor or to TensorDict
|
||||||
|
obs_tensor = obs_as_tensor(self._last_obs, self.device)
|
||||||
|
actions, values, log_probs = self.policy(obs_tensor)
|
||||||
|
actions = actions.cpu().numpy()
|
||||||
|
|
||||||
|
# Rescale and perform action
|
||||||
|
clipped_actions = actions
|
||||||
|
# Clip the actions to avoid out of bound error
|
||||||
|
if isinstance(self.action_space, gym.spaces.Box):
|
||||||
|
clipped_actions = np.clip(
|
||||||
|
actions, self.action_space.low, self.action_space.high)
|
||||||
|
|
||||||
|
new_obs, rewards, dones, infos = env.step(clipped_actions)
|
||||||
|
|
||||||
|
self.num_timesteps += env.num_envs
|
||||||
|
|
||||||
|
# Give access to local variables
|
||||||
|
callback.update_locals(locals())
|
||||||
|
if callback.on_step() is False:
|
||||||
|
return False
|
||||||
|
|
||||||
|
self._update_info_buffer(infos)
|
||||||
|
n_steps += 1
|
||||||
|
|
||||||
|
if isinstance(self.action_space, gym.spaces.Discrete):
|
||||||
|
# Reshape in case of discrete action
|
||||||
|
actions = actions.reshape(-1, 1)
|
||||||
|
|
||||||
|
# Handle timeout by bootstraping with value function
|
||||||
|
# see GitHub issue #633
|
||||||
|
for idx, done in enumerate(dones):
|
||||||
|
if (
|
||||||
|
done
|
||||||
|
and infos[idx].get("terminal_observation") is not None
|
||||||
|
and infos[idx].get("TimeLimit.truncated", False)
|
||||||
|
):
|
||||||
|
terminal_obs = self.policy.obs_to_tensor(
|
||||||
|
infos[idx]["terminal_observation"])[0]
|
||||||
|
with th.no_grad():
|
||||||
|
terminal_value = self.policy.predict_values(terminal_obs)[
|
||||||
|
0]
|
||||||
|
rewards[idx] += self.gamma * terminal_value
|
||||||
|
|
||||||
|
rollout_buffer.add(self._last_obs, actions, rewards,
|
||||||
|
self._last_episode_starts, values, log_probs)
|
||||||
|
self._last_obs = new_obs
|
||||||
|
self._last_episode_starts = dones
|
||||||
|
|
||||||
|
with th.no_grad():
|
||||||
|
# Compute value for the last timestep
|
||||||
|
values = self.policy.predict_values(
|
||||||
|
obs_as_tensor(new_obs, self.device))
|
||||||
|
|
||||||
|
rollout_buffer.compute_returns_and_advantage(
|
||||||
|
last_values=values, dones=dones)
|
||||||
|
|
||||||
|
callback.on_rollout_end()
|
||||||
|
|
||||||
|
return True
|
||||||
|
Loading…
Reference in New Issue
Block a user