From 7f58093c5ec0df4d70663f2e87f1c3a02d47e6b1 Mon Sep 17 00:00:00 2001
From: Dominik Roth <dominik.roth.dev@gmail.com>
Date: Sun, 17 Sep 2023 18:50:21 +0200
Subject: [PATCH] Fixed all examples

---
 fancy_gym/examples/example_replanning_envs.py | 17 +++++-----
 fancy_gym/examples/examples_dmc.py            | 19 ++++++-----
 fancy_gym/examples/examples_general.py        |  9 +++--
 fancy_gym/examples/examples_metaworld.py      | 17 +++++-----
 .../examples/examples_movement_primitives.py  | 34 +++++++++----------
 fancy_gym/examples/examples_open_ai.py        |  9 +++--
 fancy_gym/examples/mp_params_tuning.py        | 10 ++++--
 fancy_gym/examples/pd_control_gain_tuning.py  |  7 ++--
 8 files changed, 63 insertions(+), 59 deletions(-)

diff --git a/fancy_gym/examples/example_replanning_envs.py b/fancy_gym/examples/example_replanning_envs.py
index 05be6ad..2c3c3f4 100644
--- a/fancy_gym/examples/example_replanning_envs.py
+++ b/fancy_gym/examples/example_replanning_envs.py
@@ -1,17 +1,18 @@
+import gymnasium as gym
 import fancy_gym
 
 
-def example_run_replanning_env(env_name="BoxPushingDenseReplanProDMP-v0", seed=1, iterations=1, render=False):
-    env = fancy_gym.make(env_name, seed=seed)
-    env.reset()
+def example_run_replanning_env(env_name="fancy_ProDMP/BoxPushingDenseReplan-v0", seed=1, iterations=1, render=False):
+    env = gym.make(env_name)
+    env.reset(seed=seed)
     for i in range(iterations):
         done = False
         while done is False:
             ac = env.action_space.sample()
-            obs, reward, done, info = env.step(ac)
+            obs, reward, terminated, truncated, info = env.step(ac)
             if render:
                 env.render(mode="human")
-            if done:
+            if terminated or truncated:
                 env.reset()
     env.close()
     del env
@@ -48,8 +49,8 @@ def example_custom_replanning_envs(seed=0, iteration=100, render=True):
 
     for i in range(iteration):
         ac = env.action_space.sample()
-        obs, reward, done, info = env.step(ac)
-        if done:
+        obs, reward, terminated, truncated, info = env.step(ac)
+        if terminated or truncated:
             env.reset()
 
     env.close()
@@ -58,7 +59,7 @@ def example_custom_replanning_envs(seed=0, iteration=100, render=True):
 
 if __name__ == "__main__":
     # run a registered replanning environment
-    example_run_replanning_env(env_name="BoxPushingDenseReplanProDMP-v0", seed=1, iterations=1, render=False)
+    example_run_replanning_env(env_name="fancy_ProDMP/BoxPushingDenseReplan-v0", seed=1, iterations=1, render=False)
 
     # run a custom replanning environment
     example_custom_replanning_envs(seed=0, iteration=8, render=True)
diff --git a/fancy_gym/examples/examples_dmc.py b/fancy_gym/examples/examples_dmc.py
index 243bd70..fbb1473 100644
--- a/fancy_gym/examples/examples_dmc.py
+++ b/fancy_gym/examples/examples_dmc.py
@@ -1,7 +1,8 @@
+import gymnasium as gym
 import fancy_gym
 
 
-def example_dmc(env_id="dmc:fish-swim", seed=1, iterations=1000, render=True):
+def example_dmc(env_id="dm_control/fish-swim", seed=1, iterations=1000, render=True):
     """
     Example for running a DMC based env in the step based setting.
     The env_id has to be specified as `domain_name:task_name` or
@@ -16,9 +17,9 @@ def example_dmc(env_id="dmc:fish-swim", seed=1, iterations=1000, render=True):
     Returns:
 
     """
-    env = fancy_gym.make(env_id, seed)
+    env = gym.make(env_id)
     rewards = 0
-    obs = env.reset()
+    obs = env.reset(seed=seed)
     print("observation shape:", env.observation_space.shape)
     print("action shape:", env.action_space.shape)
 
@@ -56,7 +57,7 @@ def example_custom_dmc_and_mp(seed=1, iterations=1, render=True):
     """
 
     # Base DMC name, according to structure of above example
-    base_env_id = "dmc:ball_in_cup-catch"
+    base_env_id = "dm_control/ball_in_cup-catch"
 
     # Replace this wrapper with the custom wrapper for your environment by inheriting from the RawInterfaceWrapper.
     # You can also add other gym.Wrappers in case they are needed.
@@ -65,8 +66,8 @@ def example_custom_dmc_and_mp(seed=1, iterations=1, render=True):
     trajectory_generator_kwargs = {'trajectory_generator_type': 'promp'}
     phase_generator_kwargs = {'phase_generator_type': 'linear'}
     controller_kwargs = {'controller_type': 'motor',
-                          "p_gains": 1.0,
-                          "d_gains": 0.1,}
+                         "p_gains": 1.0,
+                         "d_gains": 0.1, }
     basis_generator_kwargs = {'basis_generator_type': 'zero_rbf',
                               'num_basis': 5,
                               'num_basis_zero_start': 1
@@ -123,14 +124,14 @@ if __name__ == '__main__':
     render = True
 
     # # Standard DMC Suite tasks
-    example_dmc("dmc:fish-swim", seed=10, iterations=1000, render=render)
+    example_dmc("dm_control/fish-swim", seed=10, iterations=1000, render=render)
     #
     # # Manipulation tasks
     # # Disclaimer: The vision versions are currently not integrated and yield an error
-    example_dmc("dmc:manipulation-reach_site_features", seed=10, iterations=250, render=render)
+    example_dmc("dm_control/manipulation-reach_site_features", seed=10, iterations=250, render=render)
     #
     # # Gym + DMC hybrid task provided in the MP framework
-    example_dmc("dmc_ball_in_cup-catch_promp-v0", seed=10, iterations=1, render=render)
+    example_dmc("dm_control_ProMP/ball_in_cup-catch-v0", seed=10, iterations=1, render=render)
 
     # Custom DMC task # Different seed, because the episode is longer for this example and the name+seed combo is
     # already registered above
diff --git a/fancy_gym/examples/examples_general.py b/fancy_gym/examples/examples_general.py
index 383c4cf..e341bfe 100644
--- a/fancy_gym/examples/examples_general.py
+++ b/fancy_gym/examples/examples_general.py
@@ -21,9 +21,9 @@ def example_general(env_id="Pendulum-v1", seed=1, iterations=1000, render=True):
 
     """
 
-    env = fancy_gym.make(env_id, seed)
+    env = gym.make(env_id)
     rewards = 0
-    obs = env.reset()
+    obs = env.reset(seed=seed)
     print("Observation shape: ", env.observation_space.shape)
     print("Action shape: ", env.action_space.shape)
 
@@ -41,7 +41,7 @@ def example_general(env_id="Pendulum-v1", seed=1, iterations=1000, render=True):
             obs = env.reset()
 
 
-def example_async(env_id="HoleReacher-v0", n_cpu=4, seed=int('533D', 16), n_samples=800):
+def example_async(env_id="fancy/HoleReacher-v0", n_cpu=4, seed=int('533D', 16), n_samples=800):
     """
     Example for running any env in a vectorized multiprocessing setting to generate more samples faster.
     This also includes DMC and DMP environments when leveraging our custom make_env function.
@@ -93,11 +93,10 @@ if __name__ == '__main__':
     example_general("Pendulum-v1", seed=10, iterations=200, render=render)
 
     # Mujoco task from framework
-    example_general("Reacher5d-v0", seed=10, iterations=200, render=render)
+    example_general("fancy/Reacher5d-v0", seed=10, iterations=200, render=render)
 
     # # OpenAI Mujoco task
     example_general("HalfCheetah-v2", seed=10, render=render)
 
     # Vectorized multiprocessing environments
     # example_async(env_id="HoleReacher-v0", n_cpu=2, seed=int('533D', 16), n_samples=2 * 200)
-
diff --git a/fancy_gym/examples/examples_metaworld.py b/fancy_gym/examples/examples_metaworld.py
index 0c38bff..7919b71 100644
--- a/fancy_gym/examples/examples_metaworld.py
+++ b/fancy_gym/examples/examples_metaworld.py
@@ -1,7 +1,8 @@
+import gymnasium as gym
 import fancy_gym
 
 
-def example_dmc(env_id="fish-swim", seed=1, iterations=1000, render=True):
+def example_meta(env_id="fish-swim", seed=1, iterations=1000, render=True):
     """
     Example for running a MetaWorld based env in the step based setting.
     The env_id has to be specified as `task_name-v2`. V1 versions are not supported and we always
@@ -17,9 +18,9 @@ def example_dmc(env_id="fish-swim", seed=1, iterations=1000, render=True):
     Returns:
 
     """
-    env = fancy_gym.make(env_id, seed)
+    env = gym.make(env_id)
     rewards = 0
-    obs = env.reset()
+    obs = env.reset(seed=seed)
     print("observation shape:", env.observation_space.shape)
     print("action shape:", env.action_space.shape)
 
@@ -40,7 +41,7 @@ def example_dmc(env_id="fish-swim", seed=1, iterations=1000, render=True):
     del env
 
 
-def example_custom_dmc_and_mp(seed=1, iterations=1, render=True):
+def example_custom_meta_and_mp(seed=1, iterations=1, render=True):
     """
     Example for running a custom movement primitive based environments.
     Our already registered environments follow the same structure.
@@ -58,7 +59,7 @@ def example_custom_dmc_and_mp(seed=1, iterations=1, render=True):
     """
 
     # Base MetaWorld name, according to structure of above example
-    base_env_id = "metaworld:button-press-v2"
+    base_env_id = "metaworld/button-press-v2"
 
     # Replace this wrapper with the custom wrapper for your environment by inheriting from the RawInterfaceWrapper.
     # You can also add other gym.Wrappers in case they are needed.
@@ -124,10 +125,10 @@ if __name__ == '__main__':
     render = False
 
     # # Standard Meta world tasks
-    example_dmc("metaworld:button-press-v2", seed=10, iterations=500, render=render)
+    example_meta("metaworld/button-press-v2", seed=10, iterations=500, render=render)
 
     # # MP + MetaWorld hybrid task provided in the our framework
-    example_dmc("ButtonPressProMP-v2", seed=10, iterations=1, render=render)
+    example_meta("metaworld_ProMP/ButtonPress-v2", seed=10, iterations=1, render=render)
     #
     # # Custom MetaWorld task
-    example_custom_dmc_and_mp(seed=10, iterations=1, render=render)
+    example_custom_meta_and_mp(seed=10, iterations=1, render=render)
diff --git a/fancy_gym/examples/examples_movement_primitives.py b/fancy_gym/examples/examples_movement_primitives.py
index 5913774..317a103 100644
--- a/fancy_gym/examples/examples_movement_primitives.py
+++ b/fancy_gym/examples/examples_movement_primitives.py
@@ -1,7 +1,8 @@
+import gymnasium as gym
 import fancy_gym
 
 
-def example_mp(env_name="HoleReacherProMP-v0", seed=1, iterations=1, render=True):
+def example_mp(env_name="fancy_ProMP/HoleReacher-v0", seed=1, iterations=1, render=True):
     """
     Example for running a black box based environment, which is already registered
     Args:
@@ -15,11 +16,11 @@ def example_mp(env_name="HoleReacherProMP-v0", seed=1, iterations=1, render=True
     """
     # Equivalent to gym, we have a make function which can be used to create environments.
     # It takes care of seeding and enables the use of a variety of external environments using the gym interface.
-    env = fancy_gym.make(env_name, seed)
+    env = gym.make(env_name)
 
     returns = 0
     # env.render(mode=None)
-    obs = env.reset()
+    obs = env.reset(seed=seed)
 
     # number of samples/full trajectories (multiple environment steps)
     for i in range(iterations):
@@ -50,7 +51,7 @@ def example_mp(env_name="HoleReacherProMP-v0", seed=1, iterations=1, render=True
             obs = env.reset()
 
 
-def example_custom_mp(env_name="Reacher5dProMP-v0", seed=1, iterations=1, render=True):
+def example_custom_mp(env_name="fancy_ProMP/Reacher5d-v0", seed=1, iterations=1, render=True):
     """
     Example for running a movement primitive based environment, which is already registered
     Args:
@@ -62,12 +63,9 @@ def example_custom_mp(env_name="Reacher5dProMP-v0", seed=1, iterations=1, render
     Returns:
 
     """
-    # Changing the arguments of the black box env is possible by providing them to gym as with all kwargs.
+    # Changing the arguments of the black box env is possible by providing them to gym through mp_config_override.
     # E.g. here for way to many basis functions
-    env = fancy_gym.make(env_name, seed, basis_generator_kwargs={'num_basis': 1000})
-    # env = fancy_gym.make(env_name, seed)
-    # mp_dict.update({'black_box_kwargs': {'learn_sub_trajectories': True}})
-    # mp_dict.update({'black_box_kwargs': {'do_replanning': lambda pos, vel, t: lambda t: t % 100}})
+    env = gym.make(env_name, seed, mp_config_override={'basis_generator_kwargs': {'num_basis': 1000}})
 
     returns = 0
     obs = env.reset()
@@ -106,7 +104,7 @@ def example_fully_custom_mp(seed=1, iterations=1, render=True):
 
     """
 
-    base_env_id = "Reacher5d-v0"
+    base_env_id = "fancy/Reacher5d-v0"
 
     # Replace this wrapper with the custom wrapper for your environment by inheriting from the RawInterfaceWrapper.
     # You can also add other gym.Wrappers in case they are needed.
@@ -157,20 +155,20 @@ def example_fully_custom_mp(seed=1, iterations=1, render=True):
 if __name__ == '__main__':
     render = False
     # DMP
-    example_mp("HoleReacherDMP-v0", seed=10, iterations=5, render=render)
+    example_mp("fancy_DMP/HoleReacher-v0", seed=10, iterations=5, render=render)
 
     # ProMP
-    example_mp("HoleReacherProMP-v0", seed=10, iterations=5, render=render)
-    example_mp("BoxPushingTemporalSparseProMP-v0", seed=10, iterations=1, render=render)
-    example_mp("TableTennis4DProMP-v0", seed=10, iterations=20, render=render)
+    example_mp("fancy_ProMP/HoleReacher-v0", seed=10, iterations=5, render=render)
+    example_mp("fancy_ProMP/BoxPushingTemporalSparse-v0", seed=10, iterations=1, render=render)
+    example_mp("fancy_ProMP/TableTennis4D-v0", seed=10, iterations=20, render=render)
 
     # ProDMP with Replanning
-    example_mp("BoxPushingDenseReplanProDMP-v0", seed=10, iterations=4, render=render)
-    example_mp("TableTennis4DReplanProDMP-v0", seed=10, iterations=20, render=render)
-    example_mp("TableTennisWindReplanProDMP-v0", seed=10, iterations=20, render=render)
+    example_mp("fancy_ProDMP/BoxPushingDenseReplan-v0", seed=10, iterations=4, render=render)
+    example_mp("fancy_ProDMP/TableTennis4DReplan-v0", seed=10, iterations=20, render=render)
+    example_mp("fancy_ProDMP/TableTennisWindReplan-v0", seed=10, iterations=20, render=render)
 
     # Altered basis functions
-    obs1 = example_custom_mp("Reacher5dProMP-v0", seed=10, iterations=1, render=render)
+    obs1 = example_custom_mp("fancy_ProMP/Reacher5d-v0", seed=10, iterations=1, render=render)
 
     # Custom MP
     example_fully_custom_mp(seed=10, iterations=1, render=render)
diff --git a/fancy_gym/examples/examples_open_ai.py b/fancy_gym/examples/examples_open_ai.py
index a79a44b..07f1719 100644
--- a/fancy_gym/examples/examples_open_ai.py
+++ b/fancy_gym/examples/examples_open_ai.py
@@ -1,3 +1,4 @@
+import gymnasium as gym
 import fancy_gym
 
 
@@ -12,11 +13,10 @@ def example_mp(env_name, seed=1, render=True):
     Returns:
 
     """
-    # While in this case gym.make() is possible to use as well, we recommend our custom make env function.
-    env = fancy_gym.make(env_name, seed)
+    env = gym.make(env_name)
 
     returns = 0
-    obs = env.reset()
+    obs = env.reset(seed=seed)
     # number of samples/full trajectories (multiple environment steps)
     for i in range(10):
         if render and i % 2 == 0:
@@ -33,5 +33,4 @@ def example_mp(env_name, seed=1, render=True):
 
 
 if __name__ == '__main__':
-    example_mp("ReacherProMP-v2")
-
+    example_mp("gym_ProMP/Reacher-v2")
diff --git a/fancy_gym/examples/mp_params_tuning.py b/fancy_gym/examples/mp_params_tuning.py
index 644d86b..71a579a 100644
--- a/fancy_gym/examples/mp_params_tuning.py
+++ b/fancy_gym/examples/mp_params_tuning.py
@@ -1,10 +1,14 @@
+import gymnasium as gym
 import fancy_gym
 
+
 def compare_bases_shape(env1_id, env2_id):
-    env1 = fancy_gym.make(env1_id, seed=0)
+    env1 = gym.make(env1_id)
     env1.traj_gen.show_scaled_basis(plot=True)
-    env2 = fancy_gym.make(env2_id, seed=0)
+    env2 = gym.make(env2_id)
     env2.traj_gen.show_scaled_basis(plot=True)
     return
+
+
 if __name__ == '__main__':
-    compare_bases_shape("TableTennis4DProDMP-v0", "TableTennis4DProMP-v0")
\ No newline at end of file
+    compare_bases_shape("fancy_ProDMP/TableTennis4D-v0", "fancy_ProMP/TableTennis4D-v0")
diff --git a/fancy_gym/examples/pd_control_gain_tuning.py b/fancy_gym/examples/pd_control_gain_tuning.py
index 4cfae39..3f8634c 100644
--- a/fancy_gym/examples/pd_control_gain_tuning.py
+++ b/fancy_gym/examples/pd_control_gain_tuning.py
@@ -3,19 +3,20 @@ from collections import OrderedDict
 import numpy as np
 from matplotlib import pyplot as plt
 
+import gymnasium as gym
 import fancy_gym
 
 # This might work for some environments, however, please verify either way the correct trajectory information
 # for your environment are extracted below
 SEED = 1
 
-env_id = "Reacher5dProMP-v0"
+env_id = "fancy_ProMP/Reacher5d-v0"
 
-env = fancy_gym.make(env_id, seed=SEED, controller_kwargs={'p_gains': 0.05, 'd_gains': 0.05}).env
+env = fancy_gym.make(env_id, mp_config_override={'controller_kwargs': {'p_gains': 0.05, 'd_gains': 0.05}}).env
 env.action_space.seed(SEED)
 
 # Plot difference between real trajectory and target MP trajectory
-env.reset()
+env.reset(seed=SEED)
 w = env.action_space.sample()
 pos, vel = env.get_trajectory(w)