fancy_gym/alr_envs/alr/mujoco/reacher/alr_reacher.py

140 lines
4.9 KiB
Python
Raw Normal View History

2020-08-28 18:31:06 +02:00
import os
import numpy as np
2020-08-28 15:48:34 +02:00
from gym import utils
2021-05-18 10:53:30 +02:00
from gym.envs.mujoco import MujocoEnv
2020-08-28 15:48:34 +02:00
2021-04-21 10:45:34 +02:00
import alr_envs.utils.utils as alr_utils
2020-08-28 18:31:06 +02:00
2021-05-18 10:53:30 +02:00
class ALRReacherEnv(MujocoEnv, utils.EzPickle):
def __init__(self, steps_before_reward=200, n_links=5, balance=False):
2021-03-22 15:25:22 +01:00
utils.EzPickle.__init__(**locals())
2020-09-22 17:41:25 +02:00
self._steps = 0
self.steps_before_reward = steps_before_reward
self.n_links = n_links
self.balance = balance
self.balance_weight = 1.0
2020-09-26 15:07:42 +02:00
self.reward_weight = 1
if steps_before_reward == 200:
self.reward_weight = 200
elif steps_before_reward == 50:
self.reward_weight = 50
2020-09-22 17:41:25 +02:00
if n_links == 5:
file_name = 'reacher_5links.xml'
elif n_links == 7:
file_name = 'reacher_7links.xml'
else:
raise ValueError(f"Invalid number of links {n_links}, only 5 or 7 allowed.")
2021-05-18 10:53:30 +02:00
MujocoEnv.__init__(self, os.path.join(os.path.dirname(__file__), "assets", file_name), 2)
2020-08-28 15:48:34 +02:00
def step(self, a):
2020-09-22 17:41:25 +02:00
self._steps += 1
reward_dist = 0.0
angular_vel = 0.0
reward_balance = 0.0
2020-09-22 17:41:25 +02:00
if self._steps >= self.steps_before_reward:
vec = self.get_body_com("fingertip") - self.get_body_com("target")
reward_dist -= self.reward_weight * np.linalg.norm(vec)
2022-01-25 15:23:57 +01:00
if self.steps_before_reward > 0:
# avoid giving this penalty for normal step based case
2022-04-07 14:40:43 +02:00
# angular_vel -= 10 * np.linalg.norm(self.sim.data.qvel.flat[:self.n_links])
angular_vel -= 10 * np.square(self.sim.data.qvel.flat[:self.n_links]).sum()
reward_ctrl = - 10 * np.square(a).sum()
if self.balance:
2021-02-09 17:07:52 +01:00
reward_balance -= self.balance_weight * np.abs(
2021-04-21 10:45:34 +02:00
alr_utils.angle_normalize(np.sum(self.sim.data.qpos.flat[:self.n_links]), type="rad"))
2020-09-22 17:41:25 +02:00
reward = reward_dist + reward_ctrl + angular_vel + reward_balance
2020-08-28 15:48:34 +02:00
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
2020-09-22 17:41:25 +02:00
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl,
velocity=angular_vel, reward_balance=reward_balance,
2020-09-22 17:41:25 +02:00
end_effector=self.get_body_com("fingertip").copy(),
goal=self.goal if hasattr(self, "goal") else None)
2020-08-28 15:48:34 +02:00
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
2022-01-25 15:23:57 +01:00
# def reset_model(self):
2022-04-07 14:40:43 +02:00
# qpos = self.init_qpos
# if not hasattr(self, "goal"):
# self.goal = np.array([-0.25, 0.25])
# # self.goal = self.init_qpos.copy()[:2] + 0.05
2022-01-25 15:23:57 +01:00
# qpos[-2:] = self.goal
2022-04-07 14:40:43 +02:00
# qvel = self.init_qvel
2022-01-25 15:23:57 +01:00
# qvel[-2:] = 0
# self.set_state(qpos, qvel)
# self._steps = 0
#
# return self._get_obs()
2020-08-28 15:48:34 +02:00
def reset_model(self):
2022-04-07 14:40:43 +02:00
qpos = self.init_qpos.copy()
while True:
self.goal = self.np_random.uniform(low=-self.n_links / 10, high=self.n_links / 10, size=2)
# self.goal = self.np_random.uniform(low=0, high=self.n_links / 10, size=2)
# self.goal = np.random.uniform(low=[-self.n_links / 10, 0], high=[0, self.n_links / 10], size=2)
if np.linalg.norm(self.goal) < self.n_links / 10:
break
2020-08-28 15:48:34 +02:00
qpos[-2:] = self.goal
2022-04-07 14:40:43 +02:00
qvel = self.init_qvel.copy()
2020-08-28 15:48:34 +02:00
qvel[-2:] = 0
self.set_state(qpos, qvel)
2020-09-22 17:41:25 +02:00
self._steps = 0
2020-08-28 15:48:34 +02:00
return self._get_obs()
2022-04-07 14:40:43 +02:00
# def reset_model(self):
# qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
# while True:
# self.goal = self.np_random.uniform(low=-self.n_links / 10, high=self.n_links / 10, size=2)
# if np.linalg.norm(self.goal) < self.n_links / 10:
# break
# qpos[-2:] = self.goal
# qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
# qvel[-2:] = 0
# self.set_state(qpos, qvel)
# self._steps = 0
#
# return self._get_obs()
2020-08-28 15:48:34 +02:00
def _get_obs(self):
2020-09-22 17:41:25 +02:00
theta = self.sim.data.qpos.flat[:self.n_links]
2022-04-07 14:40:43 +02:00
target = self.get_body_com("target")
2020-08-28 15:48:34 +02:00
return np.concatenate([
np.cos(theta),
np.sin(theta),
2022-04-07 14:40:43 +02:00
target[:2], # x-y of goal position
self.sim.data.qvel.flat[:self.n_links], # angular velocity
self.get_body_com("fingertip") - target, # goal distance
2020-09-22 17:41:25 +02:00
[self._steps],
2020-08-28 15:48:34 +02:00
])
2021-06-24 11:39:59 +02:00
2021-11-30 12:05:19 +01:00
2021-06-24 11:39:59 +02:00
if __name__ == '__main__':
nl = 5
render_mode = "human" # "human" or "partial" or "final"
env = ALRReacherEnv(n_links=nl)
obs = env.reset()
for i in range(2000):
# objective.load_result("/tmp/cma")
# test with random actions
ac = env.action_space.sample()
obs, rew, d, info = env.step(ac)
if i % 10 == 0:
env.render(mode=render_mode)
if d:
env.reset()
2022-04-07 14:40:43 +02:00
env.close()