diff --git a/alr_envs/__init__.py b/alr_envs/__init__.py index 144b5f2..0dfc1f5 100644 --- a/alr_envs/__init__.py +++ b/alr_envs/__init__.py @@ -573,7 +573,7 @@ register( entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper', kwargs={ "name": "gym.envs.classic_control:MountainCarContinuous-v0", - "wrappers": [continuous_mountain_car.PositionalWrapper, continuous_mountain_car.MPWrapper], + "wrappers": [continuous_mountain_car.MPWrapper], "mp_kwargs": { "num_dof": 1, "num_basis": 4, @@ -594,7 +594,7 @@ register( entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper', kwargs={ "name": "gym.envs.mujoco:Reacher-v2", - "wrappers": [reacher_v2.PositionalWrapper, reacher_v2.MPWrapper], + "wrappers": [reacher_v2.MPWrapper], "mp_kwargs": { "num_dof": 2, "num_basis": 6, @@ -615,7 +615,7 @@ register( entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper', kwargs={ "name": "gym.envs.robotics:FetchSlideDense-v1", - "wrappers": [fetch.PositionalWrapper, fetch.MPWrapper], + "wrappers": [fetch.MPWrapper], "mp_kwargs": { "num_dof": 4, "num_basis": 5, @@ -632,7 +632,7 @@ register( entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper', kwargs={ "name": "gym.envs.robotics:FetchReachDense-v1", - "wrappers": [fetch.PositionalWrapper, fetch.MPWrapper], + "wrappers": [fetch.MPWrapper], "mp_kwargs": { "num_dof": 4, "num_basis": 5, diff --git a/alr_envs/open_ai/continuous_mountain_car/__init__.py b/alr_envs/open_ai/continuous_mountain_car/__init__.py index 4cff6da..36f731d 100644 --- a/alr_envs/open_ai/continuous_mountain_car/__init__.py +++ b/alr_envs/open_ai/continuous_mountain_car/__init__.py @@ -1,2 +1 @@ -from alr_envs.open_ai.continuous_mountain_car.positional_wrapper import PositionalWrapper from alr_envs.open_ai.continuous_mountain_car.mp_wrapper import MPWrapper \ No newline at end of file diff --git a/alr_envs/open_ai/continuous_mountain_car/mp_wrapper.py b/alr_envs/open_ai/continuous_mountain_car/mp_wrapper.py index 886b1e1..f0bccab 100644 --- a/alr_envs/open_ai/continuous_mountain_car/mp_wrapper.py +++ b/alr_envs/open_ai/continuous_mountain_car/mp_wrapper.py @@ -1,12 +1,17 @@ from typing import Union +import numpy as np from mp_env_api.env_wrappers.mp_env_wrapper import MPEnvWrapper class MPWrapper(MPEnvWrapper): @property - def start_pos(self): - raise ValueError("Start position is not available") + def current_vel(self) -> Union[float, int, np.ndarray]: + return np.array([self.state[1]]) + + @property + def current_pos(self) -> Union[float, int, np.ndarray]: + return np.array([self.state[0]]) @property def goal_pos(self): diff --git a/alr_envs/open_ai/continuous_mountain_car/positional_wrapper.py b/alr_envs/open_ai/continuous_mountain_car/positional_wrapper.py deleted file mode 100644 index 5b587fa..0000000 --- a/alr_envs/open_ai/continuous_mountain_car/positional_wrapper.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Union -import numpy as np -from mp_env_api.env_wrappers.positional_env_wrapper import PositionalEnvWrapper - - -class PositionalWrapper(PositionalEnvWrapper): - @property - def current_vel(self) -> Union[float, int, np.ndarray]: - return np.array([self.state[1]]) - - @property - def current_pos(self) -> Union[float, int, np.ndarray]: - return np.array([self.state[0]]) \ No newline at end of file diff --git a/alr_envs/open_ai/fetch/__init__.py b/alr_envs/open_ai/fetch/__init__.py index 4c6d088..2e68176 100644 --- a/alr_envs/open_ai/fetch/__init__.py +++ b/alr_envs/open_ai/fetch/__init__.py @@ -1,2 +1 @@ -from alr_envs.open_ai.fetch.positional_wrapper import PositionalWrapper from alr_envs.open_ai.fetch.mp_wrapper import MPWrapper \ No newline at end of file diff --git a/alr_envs/open_ai/fetch/mp_wrapper.py b/alr_envs/open_ai/fetch/mp_wrapper.py index 2ac7b59..acb07a3 100644 --- a/alr_envs/open_ai/fetch/mp_wrapper.py +++ b/alr_envs/open_ai/fetch/mp_wrapper.py @@ -1,13 +1,17 @@ from typing import Union -from gym import spaces +import numpy as np from mp_env_api.env_wrappers.mp_env_wrapper import MPEnvWrapper class MPWrapper(MPEnvWrapper): @property - def start_pos(self): - return self.initial_gripper_xpos + def current_vel(self) -> Union[float, int, np.ndarray]: + return self.unwrapped._get_obs()["observation"][-5:-1] + + @property + def current_pos(self) -> Union[float, int, np.ndarray]: + return self.unwrapped._get_obs()["observation"][:4] @property def goal_pos(self): diff --git a/alr_envs/open_ai/fetch/positional_wrapper.py b/alr_envs/open_ai/fetch/positional_wrapper.py deleted file mode 100644 index 9c6dcf2..0000000 --- a/alr_envs/open_ai/fetch/positional_wrapper.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Union -import numpy as np -from mp_env_api.env_wrappers.positional_env_wrapper import PositionalEnvWrapper - - -class PositionalWrapper(PositionalEnvWrapper): - @property - def current_vel(self) -> Union[float, int, np.ndarray]: - return self.unwrapped._get_obs()["observation"][-5:-1] - - @property - def current_pos(self) -> Union[float, int, np.ndarray]: - return self.unwrapped._get_obs()["observation"][:4] \ No newline at end of file diff --git a/alr_envs/open_ai/reacher_v2/__init__.py b/alr_envs/open_ai/reacher_v2/__init__.py index a0acbea..48a5615 100644 --- a/alr_envs/open_ai/reacher_v2/__init__.py +++ b/alr_envs/open_ai/reacher_v2/__init__.py @@ -1,2 +1 @@ -from alr_envs.open_ai.reacher_v2.positional_wrapper import PositionalWrapper from alr_envs.open_ai.reacher_v2.mp_wrapper import MPWrapper \ No newline at end of file diff --git a/alr_envs/open_ai/reacher_v2/mp_wrapper.py b/alr_envs/open_ai/reacher_v2/mp_wrapper.py index be67a35..7636f50 100644 --- a/alr_envs/open_ai/reacher_v2/mp_wrapper.py +++ b/alr_envs/open_ai/reacher_v2/mp_wrapper.py @@ -1,13 +1,18 @@ from typing import Union +import numpy as np from mp_env_api.env_wrappers.mp_env_wrapper import MPEnvWrapper class MPWrapper(MPEnvWrapper): @property - def start_pos(self): - raise ValueError("Start position is not available") + def current_vel(self) -> Union[float, int, np.ndarray]: + return self.sim.data.qvel[:2] + + @property + def current_pos(self) -> Union[float, int, np.ndarray]: + return self.sim.data.qpos[:2] @property def goal_pos(self): diff --git a/alr_envs/open_ai/reacher_v2/positional_wrapper.py b/alr_envs/open_ai/reacher_v2/positional_wrapper.py deleted file mode 100644 index 0fc622b..0000000 --- a/alr_envs/open_ai/reacher_v2/positional_wrapper.py +++ /dev/null @@ -1,13 +0,0 @@ -from typing import Union -import numpy as np -from mp_env_api.env_wrappers.positional_env_wrapper import PositionalEnvWrapper - - -class PositionalWrapper(PositionalEnvWrapper): - @property - def current_vel(self) -> Union[float, int, np.ndarray]: - return self.sim.data.qvel[:2] - - @property - def current_pos(self) -> Union[float, int, np.ndarray]: - return self.sim.data.qpos[:2] \ No newline at end of file