reacher adjustments
This commit is contained in:
parent
640f3b2d90
commit
ad30e732c8
@ -39,11 +39,18 @@ class ALRReacherEnv(MujocoEnv, utils.EzPickle):
|
|||||||
reward_dist = 0.0
|
reward_dist = 0.0
|
||||||
angular_vel = 0.0
|
angular_vel = 0.0
|
||||||
reward_balance = 0.0
|
reward_balance = 0.0
|
||||||
|
is_delayed = self.steps_before_reward > 0
|
||||||
|
reward_ctrl = - np.square(a).sum()
|
||||||
if self._steps >= self.steps_before_reward:
|
if self._steps >= self.steps_before_reward:
|
||||||
vec = self.get_body_com("fingertip") - self.get_body_com("target")
|
vec = self.get_body_com("fingertip") - self.get_body_com("target")
|
||||||
reward_dist -= self.reward_weight * np.linalg.norm(vec)
|
reward_dist -= self.reward_weight * np.linalg.norm(vec)
|
||||||
angular_vel -= np.linalg.norm(self.sim.data.qvel.flat[:self.n_links])
|
if is_delayed:
|
||||||
reward_ctrl = - np.square(a).sum()
|
# avoid giving this penalty for normal step based case
|
||||||
|
# angular_vel -= 10 * np.linalg.norm(self.sim.data.qvel.flat[:self.n_links])
|
||||||
|
angular_vel -= 10 * np.square(self.sim.data.qvel.flat[:self.n_links]).sum()
|
||||||
|
if is_delayed:
|
||||||
|
# Higher control penalty for sparse reward per timestep
|
||||||
|
reward_ctrl *= 10
|
||||||
|
|
||||||
if self.balance:
|
if self.balance:
|
||||||
reward_balance -= self.balance_weight * np.abs(
|
reward_balance -= self.balance_weight * np.abs(
|
||||||
@ -56,63 +63,66 @@ class ALRReacherEnv(MujocoEnv, utils.EzPickle):
|
|||||||
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl,
|
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl,
|
||||||
velocity=angular_vel, reward_balance=reward_balance,
|
velocity=angular_vel, reward_balance=reward_balance,
|
||||||
end_effector=self.get_body_com("fingertip").copy(),
|
end_effector=self.get_body_com("fingertip").copy(),
|
||||||
goal=self.goal if hasattr(self, "goal") else None,
|
goal=self.goal if hasattr(self, "goal") else None)
|
||||||
joint_pos = self.sim.data.qpos.flat[:self.n_links].copy(),
|
|
||||||
joint_vel = self.sim.data.qvel.flat[:self.n_links].copy())
|
|
||||||
|
|
||||||
def viewer_setup(self):
|
def viewer_setup(self):
|
||||||
self.viewer.cam.trackbodyid = 0
|
self.viewer.cam.trackbodyid = 0
|
||||||
|
|
||||||
|
# def reset_model(self):
|
||||||
|
# qpos = self.init_qpos
|
||||||
|
# if not hasattr(self, "goal"):
|
||||||
|
# self.goal = np.array([-0.25, 0.25])
|
||||||
|
# # self.goal = self.init_qpos.copy()[:2] + 0.05
|
||||||
|
# qpos[-2:] = self.goal
|
||||||
|
# qvel = self.init_qvel
|
||||||
|
# qvel[-2:] = 0
|
||||||
|
# self.set_state(qpos, qvel)
|
||||||
|
# self._steps = 0
|
||||||
|
#
|
||||||
|
# return self._get_obs()
|
||||||
|
|
||||||
def reset_model(self):
|
def reset_model(self):
|
||||||
# qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
|
qpos = self.init_qpos.copy()
|
||||||
qpos = self.init_qpos
|
|
||||||
while True:
|
while True:
|
||||||
self.goal = self.np_random.uniform(low=-self.n_links / 10, high=self.n_links / 10, size=2)
|
self.goal = self.np_random.uniform(low=-self.n_links / 10, high=self.n_links / 10, size=2)
|
||||||
|
# self.goal = self.np_random.uniform(low=0, high=self.n_links / 10, size=2)
|
||||||
|
# self.goal = np.random.uniform(low=[-self.n_links / 10, 0], high=[0, self.n_links / 10], size=2)
|
||||||
if np.linalg.norm(self.goal) < self.n_links / 10:
|
if np.linalg.norm(self.goal) < self.n_links / 10:
|
||||||
break
|
break
|
||||||
qpos[-2:] = self.goal
|
qpos[-2:] = self.goal
|
||||||
qvel = self.init_qvel# + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
|
qvel = self.init_qvel.copy()
|
||||||
qvel[-2:] = 0
|
qvel[-2:] = 0
|
||||||
self.set_state(qpos, qvel)
|
self.set_state(qpos, qvel)
|
||||||
self._steps = 0
|
self._steps = 0
|
||||||
|
|
||||||
return self._get_obs()
|
return self._get_obs()
|
||||||
|
|
||||||
|
# def reset_model(self):
|
||||||
|
# qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
|
||||||
|
# while True:
|
||||||
|
# self.goal = self.np_random.uniform(low=-self.n_links / 10, high=self.n_links / 10, size=2)
|
||||||
|
# if np.linalg.norm(self.goal) < self.n_links / 10:
|
||||||
|
# break
|
||||||
|
# qpos[-2:] = self.goal
|
||||||
|
# qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
|
||||||
|
# qvel[-2:] = 0
|
||||||
|
# self.set_state(qpos, qvel)
|
||||||
|
# self._steps = 0
|
||||||
|
#
|
||||||
|
# return self._get_obs()
|
||||||
|
|
||||||
def _get_obs(self):
|
def _get_obs(self):
|
||||||
theta = self.sim.data.qpos.flat[:self.n_links]
|
theta = self.sim.data.qpos.flat[:self.n_links]
|
||||||
|
target = self.get_body_com("target")
|
||||||
return np.concatenate([
|
return np.concatenate([
|
||||||
np.cos(theta),
|
np.cos(theta),
|
||||||
np.sin(theta),
|
np.sin(theta),
|
||||||
self.sim.data.qpos.flat[self.n_links:], # this is goal position
|
target[:2], # x-y of goal position
|
||||||
self.sim.data.qvel.flat[:self.n_links], # this is angular velocity
|
self.sim.data.qvel.flat[:self.n_links], # angular velocity
|
||||||
self.get_body_com("fingertip") - self.get_body_com("target"),
|
self.get_body_com("fingertip") - target, # goal distance
|
||||||
# self.get_body_com("target"), # only return target to make problem harder
|
|
||||||
[self._steps],
|
[self._steps],
|
||||||
])
|
])
|
||||||
|
|
||||||
class ALRReacherOptCtrlEnv(ALRReacherEnv):
|
|
||||||
def __init__(self, steps_before_reward=200, n_links=5, balance=False):
|
|
||||||
self.goal = np.array([0.1, 0.1])
|
|
||||||
super(ALRReacherOptCtrlEnv, self).__init__(steps_before_reward, n_links, balance)
|
|
||||||
|
|
||||||
def _get_obs(self):
|
|
||||||
theta = self.sim.data.qpos.flat[:self.n_links]
|
|
||||||
tip_pos = self.get_body_com("fingertip")
|
|
||||||
return np.concatenate([
|
|
||||||
tip_pos[:2],
|
|
||||||
theta,
|
|
||||||
self.sim.data.qvel.flat[:self.n_links], # this is angular velocity
|
|
||||||
])
|
|
||||||
|
|
||||||
def reset_model(self):
|
|
||||||
qpos = self.init_qpos
|
|
||||||
qpos[-2:] = self.goal
|
|
||||||
qvel = self.init_qvel
|
|
||||||
qvel[-2:] = 0
|
|
||||||
self.set_state(qpos, qvel)
|
|
||||||
self._steps = 0
|
|
||||||
|
|
||||||
return self._get_obs()
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
nl = 5
|
nl = 5
|
||||||
@ -130,4 +140,4 @@ if __name__ == '__main__':
|
|||||||
if d:
|
if d:
|
||||||
env.reset()
|
env.reset()
|
||||||
|
|
||||||
env.close()
|
env.close()
|
||||||
|
Loading…
Reference in New Issue
Block a user