diff --git a/alr_envs/examples/examples_movement_primitives.py b/alr_envs/examples/examples_movement_primitives.py index 85dd6a1..755d912 100644 --- a/alr_envs/examples/examples_movement_primitives.py +++ b/alr_envs/examples/examples_movement_primitives.py @@ -36,11 +36,11 @@ def example_mp(env_name="HoleReacherProMP-v0", seed=1, iterations=1, render=True env.render(mode=None) # Now the action space is not the raw action but the parametrization of the trajectory generator, - # such as a ProMP + # such as a ProMP. You can still use it the same, though. ac = env.action_space.sample() # This executes a full trajectory obs, reward, done, info = env.step(ac) - # Aggregated reward + # Aggregated reward of trajectory rewards += reward if done: @@ -62,9 +62,8 @@ def example_custom_mp(env_name="Reacher5dProMP-v0", seed=1, iterations=1, render """ # Changing the arguments of the black box env is possible by providing them to gym as with all kwargs. - # E.g. here for way to many basis functions - # env = alr_envs.make(env_name, seed, basis_generator_kwargs={'num_basis': 1000}) - env = alr_envs.make(env_name, seed) + # E.g. here for adding a lot of basis functions + env = alr_envs.make(env_name, seed, basis_generator_kwargs={'num_basis': 1000}) # mp_dict.update({'black_box_kwargs': {'learn_sub_trajectories': True}}) # mp_dict.update({'black_box_kwargs': {'do_replanning': lambda pos, vel, t: lambda t: t % 100}})