diff --git a/alr_envs/alr/mujoco/reacher/alr_reacher.py b/alr_envs/alr/mujoco/reacher/alr_reacher.py index 4477a66..b436fdd 100644 --- a/alr_envs/alr/mujoco/reacher/alr_reacher.py +++ b/alr_envs/alr/mujoco/reacher/alr_reacher.py @@ -39,11 +39,18 @@ class ALRReacherEnv(MujocoEnv, utils.EzPickle): reward_dist = 0.0 angular_vel = 0.0 reward_balance = 0.0 + is_delayed = self.steps_before_reward > 0 + reward_ctrl = - np.square(a).sum() if self._steps >= self.steps_before_reward: vec = self.get_body_com("fingertip") - self.get_body_com("target") reward_dist -= self.reward_weight * np.linalg.norm(vec) - angular_vel -= np.linalg.norm(self.sim.data.qvel.flat[:self.n_links]) - reward_ctrl = - np.square(a).sum() + if is_delayed: + # avoid giving this penalty for normal step based case + # angular_vel -= 10 * np.linalg.norm(self.sim.data.qvel.flat[:self.n_links]) + angular_vel -= 10 * np.square(self.sim.data.qvel.flat[:self.n_links]).sum() + if is_delayed: + # Higher control penalty for sparse reward per timestep + reward_ctrl *= 10 if self.balance: reward_balance -= self.balance_weight * np.abs( @@ -56,63 +63,66 @@ class ALRReacherEnv(MujocoEnv, utils.EzPickle): return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl, velocity=angular_vel, reward_balance=reward_balance, end_effector=self.get_body_com("fingertip").copy(), - goal=self.goal if hasattr(self, "goal") else None, - joint_pos = self.sim.data.qpos.flat[:self.n_links].copy(), - joint_vel = self.sim.data.qvel.flat[:self.n_links].copy()) + goal=self.goal if hasattr(self, "goal") else None) def viewer_setup(self): self.viewer.cam.trackbodyid = 0 + # def reset_model(self): + # qpos = self.init_qpos + # if not hasattr(self, "goal"): + # self.goal = np.array([-0.25, 0.25]) + # # self.goal = self.init_qpos.copy()[:2] + 0.05 + # qpos[-2:] = self.goal + # qvel = self.init_qvel + # qvel[-2:] = 0 + # self.set_state(qpos, qvel) + # self._steps = 0 + # + # return self._get_obs() + def reset_model(self): - # qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos - qpos = self.init_qpos + qpos = self.init_qpos.copy() while True: self.goal = self.np_random.uniform(low=-self.n_links / 10, high=self.n_links / 10, size=2) + # self.goal = self.np_random.uniform(low=0, high=self.n_links / 10, size=2) + # self.goal = np.random.uniform(low=[-self.n_links / 10, 0], high=[0, self.n_links / 10], size=2) if np.linalg.norm(self.goal) < self.n_links / 10: break qpos[-2:] = self.goal - qvel = self.init_qvel# + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv) + qvel = self.init_qvel.copy() qvel[-2:] = 0 self.set_state(qpos, qvel) self._steps = 0 return self._get_obs() + # def reset_model(self): + # qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos + # while True: + # self.goal = self.np_random.uniform(low=-self.n_links / 10, high=self.n_links / 10, size=2) + # if np.linalg.norm(self.goal) < self.n_links / 10: + # break + # qpos[-2:] = self.goal + # qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv) + # qvel[-2:] = 0 + # self.set_state(qpos, qvel) + # self._steps = 0 + # + # return self._get_obs() + def _get_obs(self): theta = self.sim.data.qpos.flat[:self.n_links] + target = self.get_body_com("target") return np.concatenate([ np.cos(theta), np.sin(theta), - self.sim.data.qpos.flat[self.n_links:], # this is goal position - self.sim.data.qvel.flat[:self.n_links], # this is angular velocity - self.get_body_com("fingertip") - self.get_body_com("target"), - # self.get_body_com("target"), # only return target to make problem harder + target[:2], # x-y of goal position + self.sim.data.qvel.flat[:self.n_links], # angular velocity + self.get_body_com("fingertip") - target, # goal distance [self._steps], ]) -class ALRReacherOptCtrlEnv(ALRReacherEnv): - def __init__(self, steps_before_reward=200, n_links=5, balance=False): - self.goal = np.array([0.1, 0.1]) - super(ALRReacherOptCtrlEnv, self).__init__(steps_before_reward, n_links, balance) - - def _get_obs(self): - theta = self.sim.data.qpos.flat[:self.n_links] - tip_pos = self.get_body_com("fingertip") - return np.concatenate([ - tip_pos[:2], - theta, - self.sim.data.qvel.flat[:self.n_links], # this is angular velocity - ]) - - def reset_model(self): - qpos = self.init_qpos - qpos[-2:] = self.goal - qvel = self.init_qvel - qvel[-2:] = 0 - self.set_state(qpos, qvel) - self._steps = 0 - - return self._get_obs() if __name__ == '__main__': nl = 5 @@ -130,4 +140,4 @@ if __name__ == '__main__': if d: env.reset() - env.close() \ No newline at end of file + env.close()