diff --git a/.gitignore b/.gitignore index 91a91dd..e6ea8d1 100644 --- a/.gitignore +++ b/.gitignore @@ -106,6 +106,7 @@ venv.bak/ # pycharm .DS_Store .idea +.vscode #configs /configs/db.cfg diff --git a/fancy_gym/black_box/black_box_wrapper.py b/fancy_gym/black_box/black_box_wrapper.py index 131c229..6da24c7 100644 --- a/fancy_gym/black_box/black_box_wrapper.py +++ b/fancy_gym/black_box/black_box_wrapper.py @@ -56,6 +56,14 @@ class BlackBoxWrapper(gym.ObservationWrapper): # self.traj_gen.set_mp_times(self.time_steps) self.traj_gen.set_duration(self.duration, self.dt) + # check + self.tau_bound = [-np.inf, np.inf] + self.delay_bound = [-np.inf, np.inf] + if hasattr(self.traj_gen.phase_gn, "tau_bound"): + self.tau_bound = self.traj_gen.phase_gn.tau_bound + if hasattr(self.traj_gen.phase_gn, "delay_bound"): + self.delay_bound = self.traj_gen.phase_gn.delay_bound + # reward computation self.reward_aggregation = reward_aggregation @@ -144,7 +152,8 @@ class BlackBoxWrapper(gym.ObservationWrapper): position, velocity = self.get_trajectory(action) position, velocity = self.env.set_episode_arguments(action, position, velocity) - traj_is_valid, position, velocity = self.env.preprocessing_and_validity_callback(action, position, velocity) + traj_is_valid, position, velocity = self.env.preprocessing_and_validity_callback(action, position, velocity, + self.tau_bound, self.delay_bound) trajectory_length = len(position) rewards = np.zeros(shape=(trajectory_length,)) @@ -159,7 +168,7 @@ class BlackBoxWrapper(gym.ObservationWrapper): if not traj_is_valid: obs, trajectory_return, terminated, truncated, infos = self.env.invalid_traj_callback(action, position, velocity, - self.return_context_observation) + self.return_context_observation, self.tau_bound, self.delay_bound) return self.observation(obs), trajectory_return, terminated, truncated, infos self.plan_steps += 1 diff --git a/fancy_gym/black_box/raw_interface_wrapper.py b/fancy_gym/black_box/raw_interface_wrapper.py index 78f1f8c..6dba765 100644 --- a/fancy_gym/black_box/raw_interface_wrapper.py +++ b/fancy_gym/black_box/raw_interface_wrapper.py @@ -52,7 +52,8 @@ class RawInterfaceWrapper(gym.Wrapper): """ return self.env.dt - def preprocessing_and_validity_callback(self, action: np.ndarray, pos_traj: np.ndarray, vel_traj: np.ndarray) \ + def preprocessing_and_validity_callback(self, action: np.ndarray, pos_traj: np.ndarray, vel_traj: np.ndarray, + tau_bound: list = None, delay_bound: list = None ) \ -> Tuple[bool, np.ndarray, np.ndarray]: """ Used to preprocess the action and check if the desired trajectory is valid. @@ -61,6 +62,8 @@ class RawInterfaceWrapper(gym.Wrapper): specified, else only traj_gen parameters pos_traj: a vector instance of the raw position trajectory vel_traj: a vector instance of the raw velocity trajectory + tau_bound: a list of two elements, the lower and upper bound of the trajectory length scaling factor + delay_bound: a list of two elements, the lower and upper bound of the time to wait before execute Returns: validity flag: bool, True if the raw trajectory is valid, False if not pos_traj: a vector instance of the preprocessed position trajectory @@ -97,7 +100,8 @@ class RawInterfaceWrapper(gym.Wrapper): """ return True - def invalid_traj_callback(self, action: np.ndarray, pos_traj: np.ndarray, vel_traj: np.ndarray) -> Tuple[np.ndarray, float, bool, dict]: + def invalid_traj_callback(self, action: np.ndarray, pos_traj: np.ndarray, vel_traj: np.ndarray, + tau_bound: list, delay_bound: list) -> Tuple[np.ndarray, float, bool, dict]: """ Used to return a artificial return from the env if the desired trajectory is invalid. Args: @@ -105,6 +109,8 @@ class RawInterfaceWrapper(gym.Wrapper): specified, else only traj_gen parameters pos_traj: a vector instance of the raw position trajectory vel_traj: a vector instance of the raw velocity trajectory + tau_bound: a list of two elements, the lower and upper bound of the trajectory length scaling factor + delay_bound: a list of two elements, the lower and upper bound of the time to wait before execute Returns: obs: artificial observation if the trajectory is invalid, by default a zero vector reward: artificial reward if the trajectory is invalid, by default 0 diff --git a/fancy_gym/envs/__init__.py b/fancy_gym/envs/__init__.py index 9a3ccdc..10ed0b0 100644 --- a/fancy_gym/envs/__init__.py +++ b/fancy_gym/envs/__init__.py @@ -212,6 +212,12 @@ for reward_type in ["Dense", "TemporalSparse", "TemporalSpatialSparse"]: mp_wrapper=mujoco.box_pushing.MPWrapper, max_episode_steps=MAX_EPISODE_STEPS_BOX_PUSHING, ) + register( + id='BoxPushingRandomInit{}-v0'.format(reward_type), + entry_point='fancy_gym.envs.mujoco:BoxPushing{}'.format(reward_type), + max_episode_steps=MAX_EPISODE_STEPS_BOX_PUSHING, + kwargs={"random_init": True} + ) upgrade( id='fancy/BoxPushing{}Replan-v0'.format(reward_type), diff --git a/fancy_gym/envs/mujoco/box_pushing/box_pushing_env.py b/fancy_gym/envs/mujoco/box_pushing/box_pushing_env.py index 4fafd44..2a90cba 100644 --- a/fancy_gym/envs/mujoco/box_pushing/box_pushing_env.py +++ b/fancy_gym/envs/mujoco/box_pushing/box_pushing_env.py @@ -36,7 +36,7 @@ class BoxPushingEnvBase(MujocoEnv, utils.EzPickle): "render_fps": 50 } - def __init__(self, frame_skip: int = 10): + def __init__(self, frame_skip: int = 10, random_init): utils.EzPickle.__init__(**locals()) self._steps = 0 self.init_qpos_box_pushing = np.array([0., 0., 0., -1.5, 0., 1.5, 0., 0., 0., 0.6, 0.45, 0.0, 1., 0., 0., 0.]) @@ -54,6 +54,7 @@ class BoxPushingEnvBase(MujocoEnv, utils.EzPickle): low=-np.inf, high=np.inf, shape=(28,), dtype=np.float64 ) + self.random_init = random_init MujocoEnv.__init__(self, model_path=os.path.join(os.path.dirname(__file__), "assets", "box_pushing.xml"), frame_skip=self.frame_skip, @@ -112,7 +113,7 @@ class BoxPushingEnvBase(MujocoEnv, utils.EzPickle): def reset_model(self): # rest box to initial position self.set_state(self.init_qpos_box_pushing, self.init_qvel_box_pushing) - box_init_pos = np.array([0.4, 0.3, -0.01, 0.0, 0.0, 0.0, 1.0]) + box_init_pos = self.sample_context() if self.random_init else np.array([0.4, 0.3, -0.01, 0.0, 0.0, 0.0, 1.0]) self.data.joint("box_joint").qpos = box_init_pos # set target position @@ -179,6 +180,9 @@ class BoxPushingEnvBase(MujocoEnv, utils.EzPickle): penalty -= v_coeff * abs(np.sum(q_dot_error[q_dot_error > 0.])) return penalty + def _get_box_vel(self): + return self.data.body("box_0").cvel.copy() + def get_body_jacp(self, name): id = mujoco.mj_name2id(self.model, 1, name) jacp = np.zeros((3, self.model.nv)) @@ -301,8 +305,8 @@ class BoxPushingEnvBase(MujocoEnv, utils.EzPickle): class BoxPushingDense(BoxPushingEnvBase): - def __init__(self, frame_skip: int = 10): - super(BoxPushingDense, self).__init__(frame_skip=frame_skip) + def __init__(self, frame_skip: int = 10, random_init: bool = False): + super(BoxPushingDense, self).__init__(frame_skip=frame_skip, random_init=random_init) def _get_reward(self, episode_end, box_pos, box_quat, target_pos, target_quat, rod_tip_pos, rod_quat, qpos, qvel, action): joint_penalty = self._joint_limit_violate_penalty(qpos, @@ -325,14 +329,14 @@ class BoxPushingDense(BoxPushingEnvBase): class BoxPushingTemporalSparse(BoxPushingEnvBase): - def __init__(self, frame_skip: int = 10): - super(BoxPushingTemporalSparse, self).__init__(frame_skip=frame_skip) + def __init__(self, frame_skip: int = 10, random_init: bool = False): + super(BoxPushingTemporalSparse, self).__init__(frame_skip=frame_skip, random_init=random_init) def _get_reward(self, episode_end, box_pos, box_quat, target_pos, target_quat, rod_tip_pos, rod_quat, qpos, qvel, action): reward = 0. joint_penalty = self._joint_limit_violate_penalty(qpos, qvel, enable_pos_limit=True, enable_vel_limit=True) - energy_cost = -0.0005 * np.sum(np.square(action)) + energy_cost = -0.02 * np.sum(np.square(action)) tcp_box_dist_reward = -2 * np.clip(np.linalg.norm(box_pos - rod_tip_pos), 0.05, 100) reward += joint_penalty + tcp_box_dist_reward + energy_cost rod_inclined_angle = rotation_distance(rod_quat, desired_rod_quat) @@ -348,21 +352,23 @@ class BoxPushingTemporalSparse(BoxPushingEnvBase): box_goal_pos_dist_reward = -3.5 * box_goal_dist * 100 box_goal_rot_dist_reward = -rotation_distance(box_quat, target_quat) / np.pi * 100 - reward += box_goal_pos_dist_reward + box_goal_rot_dist_reward + ep_end_joint_vel = -50. * np.linalg.norm(qvel) + + reward += box_goal_pos_dist_reward + box_goal_rot_dist_reward + ep_end_joint_vel return reward class BoxPushingTemporalSpatialSparse(BoxPushingEnvBase): - def __init__(self, frame_skip: int = 10): - super(BoxPushingTemporalSpatialSparse, self).__init__(frame_skip=frame_skip) + def __init__(self, frame_skip: int = 10, random_init: bool = False): + super(BoxPushingTemporalSpatialSparse, self).__init__(frame_skip=frame_skip, random_init=random_init) def _get_reward(self, episode_end, box_pos, box_quat, target_pos, target_quat, rod_tip_pos, rod_quat, qpos, qvel, action): reward = 0. joint_penalty = self._joint_limit_violate_penalty(qpos, qvel, enable_pos_limit=True, enable_vel_limit=True) - energy_cost = -0.0005 * np.sum(np.square(action)) + energy_cost = -0.02 * np.sum(np.square(action)) tcp_box_dist_reward = -2 * np.clip(np.linalg.norm(box_pos - rod_tip_pos), 0.05, 100) reward += joint_penalty + tcp_box_dist_reward + energy_cost rod_inclined_angle = rotation_distance(rod_quat, desired_rod_quat) @@ -382,3 +388,72 @@ class BoxPushingTemporalSpatialSparse(BoxPushingEnvBase): reward += box_goal_pos_dist_reward + box_goal_rot_dist_reward return reward + + +class BoxPushingTemporalSpatialSparse2(BoxPushingEnvBase): + + def __init__(self, frame_skip: int = 10, random_init: bool = False): + super(BoxPushingTemporalSpatialSparse2, self).__init__(frame_skip=frame_skip, random_init=random_init) + + def _get_reward(self, episode_end, box_pos, box_quat, target_pos, target_quat, + rod_tip_pos, rod_quat, qpos, qvel, action): + reward = 0. + joint_penalty = self._joint_limit_violate_penalty(qpos, qvel, enable_pos_limit=True, enable_vel_limit=True) + energy_cost = -0.0005 * np.sum(np.square(action)) + tcp_box_dist_reward = -2 * np.clip(np.linalg.norm(box_pos - rod_tip_pos), 0.05, 100) + + reward += joint_penalty + energy_cost + tcp_box_dist_reward + + rod_inclined_angle = rotation_distance(rod_quat, desired_rod_quat) + + if rod_inclined_angle > np.pi / 4: + reward -= rod_inclined_angle / (np.pi) + + if not episode_end: + return reward + + # Force the robot to stop at the end + reward += -50. * np.linalg.norm(qvel) + + box_goal_dist = np.linalg.norm(box_pos - target_pos) + + if box_goal_dist < 0.1: + box_goal_pos_dist_reward = np.clip(- 350. * box_goal_dist, -200, 0) + box_goal_rot_dist_reward = np.clip(- rotation_distance(box_quat, target_quat)/np.pi * 100., -100, 0) + reward += box_goal_pos_dist_reward + box_goal_rot_dist_reward + else: + reward -= 300. + + return reward + + +class BoxPushingNoConstraintSparse(BoxPushingEnvBase): + def __init__(self, frame_skip: int = 10, random_init: bool = False): + super(BoxPushingNoConstraintSparse, self).__init__(frame_skip=frame_skip, random_init=random_init) + + def _get_reward(self, episode_end, box_pos, box_quat, target_pos, target_quat, + rod_tip_pos, rod_quat, qpos, qvel, action): + reward = 0. + joint_penalty = self._joint_limit_violate_penalty(qpos, qvel, enable_pos_limit=True, enable_vel_limit=True) + energy_cost = -0.0005 * np.sum(np.square(action)) + reward += joint_penalty + energy_cost + + if not episode_end: + return reward + + box_goal_dist = np.linalg.norm(box_pos - target_pos) + + box_goal_pos_dist_reward = -3.5 * box_goal_dist * 100 + box_goal_rot_dist_reward = -rotation_distance(box_quat, target_quat) / np.pi * 100 + + reward += box_goal_pos_dist_reward + box_goal_rot_dist_reward + self._get_end_vel_penalty() + + return reward + + def _get_end_vel_penalty(self): + rot_coeff = 150. + pos_coeff = 150. + box_rot_pos_vel = self._get_box_vel() + box_rot_vel = box_rot_pos_vel[:3] + box_pos_vel = box_rot_pos_vel[3:] + return -rot_coeff * np.linalg.norm(box_rot_vel) - pos_coeff * np.linalg.norm(box_pos_vel) diff --git a/fancy_gym/envs/mujoco/box_pushing/mp_wrapper.py b/fancy_gym/envs/mujoco/box_pushing/mp_wrapper.py index 9d775b8..06bb7dc 100644 --- a/fancy_gym/envs/mujoco/box_pushing/mp_wrapper.py +++ b/fancy_gym/envs/mujoco/box_pushing/mp_wrapper.py @@ -31,6 +31,17 @@ class MPWrapper(RawInterfaceWrapper): # Random x goal + random init pos @property def context_mask(self): + if self.random_init: + return np.hstack([ + [True] * 7, # joints position + [False] * 7, # joints velocity + [True] * 3, # position of box + [True] * 4, # orientation of box + [True] * 3, # position of target + [True] * 4, # orientation of target + # [True] * 1, # time + ]) + return np.hstack([ [False] * 7, # joints position [False] * 7, # joints velocity diff --git a/fancy_gym/envs/mujoco/reacher/reacher.py b/fancy_gym/envs/mujoco/reacher/reacher.py index 8f5f893..f5af7f6 100644 --- a/fancy_gym/envs/mujoco/reacher/reacher.py +++ b/fancy_gym/envs/mujoco/reacher/reacher.py @@ -101,11 +101,11 @@ class ReacherEnv(MujocoEnv, utils.EzPickle): # I Quadrant # self.goal = self.np_random.uniform(low=0, high=self.n_links / 10, size=2) # II Quadrant - # self.goal = np.random.uniform(low=[-self.n_links / 10, 0], high=[0, self.n_links / 10], size=2) + # self.goal = self.np_random.uniform(low=[-self.n_links / 10, 0], high=[0, self.n_links / 10], size=2) # II + III Quadrant - # self.goal = np.random.uniform(low=-self.n_links / 10, high=[0, self.n_links / 10], size=2) + # self.goal = self.np_random.uniform(low=-self.n_links / 10, high=[0, self.n_links / 10], size=2) # I + II Quadrant - # self.goal = np.random.uniform(low=[-self.n_links / 10, 0], high=self.n_links, size=2) + # self.goal = self.np_random.uniform(low=[-self.n_links / 10, 0], high=self.n_links, size=2) if np.linalg.norm(self.goal) < self.n_links / 10: break diff --git a/fancy_gym/envs/mujoco/table_tennis/mp_wrapper.py b/fancy_gym/envs/mujoco/table_tennis/mp_wrapper.py index 3e5a464..fcc31a8 100644 --- a/fancy_gym/envs/mujoco/table_tennis/mp_wrapper.py +++ b/fancy_gym/envs/mujoco/table_tennis/mp_wrapper.py @@ -76,15 +76,16 @@ class TT_MPWrapper(RawInterfaceWrapper): def current_vel(self) -> Union[float, int, np.ndarray, Tuple]: return self.data.qvel[:7].copy() - def preprocessing_and_validity_callback(self, action, pos_traj, vel_traj): - return self.check_traj_validity(action, pos_traj, vel_traj) + def preprocessing_and_validity_callback(self, action: np.ndarray, pos_traj: np.ndarray, vel_traj: np.ndarray, + tau_bound: list, delay_bound:list): + return self.check_traj_validity(action, pos_traj, vel_traj, tau_bound, delay_bound) def set_episode_arguments(self, action, pos_traj, vel_traj): return pos_traj, vel_traj def invalid_traj_callback(self, action: np.ndarray, pos_traj: np.ndarray, vel_traj: np.ndarray, - return_contextual_obs: bool) -> Tuple[np.ndarray, float, bool, dict]: - return self.get_invalid_traj_step_return(action, pos_traj, return_contextual_obs) + return_contextual_obs: bool, tau_bound:list, delay_bound:list) -> Tuple[np.ndarray, float, bool, dict]: + return self.get_invalid_traj_step_return(action, pos_traj, return_contextual_obs, tau_bound, delay_bound) class TT_MPWrapper_Replan(TT_MPWrapper): diff --git a/fancy_gym/envs/mujoco/table_tennis/table_tennis_env.py b/fancy_gym/envs/mujoco/table_tennis/table_tennis_env.py index 55aa77c..5395de7 100644 --- a/fancy_gym/envs/mujoco/table_tennis/table_tennis_env.py +++ b/fancy_gym/envs/mujoco/table_tennis/table_tennis_env.py @@ -5,7 +5,7 @@ from gymnasium import utils, spaces from gymnasium.envs.mujoco import MujocoEnv from fancy_gym.envs.mujoco.table_tennis.table_tennis_utils import is_init_state_valid, magnus_force -from fancy_gym.envs.mujoco.table_tennis.table_tennis_utils import jnt_pos_low, jnt_pos_high, delay_bound, tau_bound +from fancy_gym.envs.mujoco.table_tennis.table_tennis_utils import jnt_pos_low, jnt_pos_high import mujoco @@ -244,7 +244,7 @@ class TableTennisEnv(MujocoEnv, utils.EzPickle): init_ball_state = self._generate_random_ball(random_pos=random_pos, random_vel=random_vel) return init_ball_state - def _get_traj_invalid_penalty(self, action, pos_traj): + def _get_traj_invalid_penalty(self, action, pos_traj, tau_bound, delay_bound): tau_invalid_penalty = 3 * (np.max([0, action[0] - tau_bound[1]]) + np.max([0, tau_bound[0] - action[0]])) delay_invalid_penalty = 3 * (np.max([0, action[1] - delay_bound[1]]) + np.max([0, delay_bound[0] - action[1]])) violate_high_bound_error = np.mean(np.maximum(pos_traj - jnt_pos_high, 0)) @@ -253,9 +253,9 @@ class TableTennisEnv(MujocoEnv, utils.EzPickle): violate_high_bound_error + violate_low_bound_error return -invalid_penalty - def get_invalid_traj_step_return(self, action, pos_traj, contextual_obs): + def get_invalid_traj_step_return(self, action, pos_traj, contextual_obs, tau_bound, delay_bound): obs = self._get_obs() if contextual_obs else np.concatenate([self._get_obs(), np.array([0])]) # 0 for invalid traj - penalty = self._get_traj_invalid_penalty(action, pos_traj) + penalty = self._get_traj_invalid_penalty(action, pos_traj, tau_bound, delay_bound) return obs, penalty, True, False, { "hit_ball": [False], "ball_returned_success": [False], @@ -266,7 +266,7 @@ class TableTennisEnv(MujocoEnv, utils.EzPickle): } @staticmethod - def check_traj_validity(action, pos_traj, vel_traj): + def check_traj_validity(action, pos_traj, vel_traj, tau_bound, delay_bound): time_invalid = action[0] > tau_bound[1] or action[0] < tau_bound[0] \ or action[1] > delay_bound[1] or action[1] < delay_bound[0] if time_invalid or np.any(pos_traj > jnt_pos_high) or np.any(pos_traj < jnt_pos_low):