From 03b930d7dd4db80a139cffbf911da29f0f32c93e Mon Sep 17 00:00:00 2001 From: Fabian Date: Tue, 17 Jan 2023 09:50:55 +0100 Subject: [PATCH] updated for new mp-pytorch version --- fancy_gym/black_box/black_box_wrapper.py | 4 ++-- fancy_gym/examples/examples_movement_primitives.py | 4 ++-- fancy_gym/examples/examples_open_ai.py | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/fancy_gym/black_box/black_box_wrapper.py b/fancy_gym/black_box/black_box_wrapper.py index 66c5f3e..b92c653 100644 --- a/fancy_gym/black_box/black_box_wrapper.py +++ b/fancy_gym/black_box/black_box_wrapper.py @@ -94,14 +94,14 @@ class BlackBoxWrapper(gym.ObservationWrapper): clipped_params = np.clip(action, self.traj_gen_action_space.low, self.traj_gen_action_space.high) self.traj_gen.set_params(clipped_params) - bc_time = np.array(0 if not self.do_replanning else self.current_traj_steps * self.dt) + init_time = np.array(0 if not self.do_replanning else self.current_traj_steps * self.dt) # TODO we could think about initializing with the previous desired value in order to have a smooth transition # at least from the planning point of view. condition_pos = self.condition_pos if self.condition_pos is not None else self.current_pos condition_vel = self.condition_vel if self.condition_vel is not None else self.current_vel - self.traj_gen.set_boundary_conditions(bc_time, condition_pos, condition_vel) + self.traj_gen.set_initial_conditions(init_time, condition_pos, condition_vel) self.traj_gen.set_duration(duration, self.dt) # traj_dict = self.traj_gen.get_trajs(get_pos=True, get_vel=True) position = get_numpy(self.traj_gen.get_traj_pos()) diff --git a/fancy_gym/examples/examples_movement_primitives.py b/fancy_gym/examples/examples_movement_primitives.py index 445b8b9..745e4e8 100644 --- a/fancy_gym/examples/examples_movement_primitives.py +++ b/fancy_gym/examples/examples_movement_primitives.py @@ -33,7 +33,7 @@ def example_mp(env_name="HoleReacherProMP-v0", seed=1, iterations=1, render=True # Just make sure the correct mode is set before executing the step. env.render(mode="human") else: - env.render(mode=None) + env.render() # Now the action space is not the raw action but the parametrization of the trajectory generator, # such as a ProMP @@ -155,7 +155,7 @@ def example_fully_custom_mp(seed=1, iterations=1, render=True): if __name__ == '__main__': - render = True + render = False # DMP example_mp("HoleReacherDMP-v0", seed=10, iterations=5, render=render) diff --git a/fancy_gym/examples/examples_open_ai.py b/fancy_gym/examples/examples_open_ai.py index a4a162d..789271f 100644 --- a/fancy_gym/examples/examples_open_ai.py +++ b/fancy_gym/examples/examples_open_ai.py @@ -22,7 +22,7 @@ def example_mp(env_name, seed=1, render=True): if render and i % 2 == 0: env.render(mode="human") else: - env.render(mode=None) + env.render() ac = env.action_space.sample() obs, reward, done, info = env.step(ac) returns += reward