fancy_gym/reacher/envs/reacher_env.py

45 lines
1.6 KiB
Python
Raw Normal View History

2020-08-28 15:48:34 +02:00
import numpy as np
from gym import utils
from gym.envs.mujoco import mujoco_env
class ReacherALREnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
utils.EzPickle.__init__(self)
mujoco_env.MujocoEnv.__init__(self, '/home/vien/git/reacher_test/reacher/envs/reacher_5links.xml', 2)
def step(self, a):
vec = self.get_body_com("fingertip")-self.get_body_com("target")
reward_dist = - np.linalg.norm(vec)
reward_ctrl = - np.square(a).sum()
reward = reward_dist + reward_ctrl
self.do_simulation(a, self.frame_skip)
ob = self._get_obs()
done = False
return ob, reward, done, dict(reward_dist=reward_dist, reward_ctrl=reward_ctrl)
def viewer_setup(self):
self.viewer.cam.trackbodyid = 0
def reset_model(self):
qpos = self.np_random.uniform(low=-0.1, high=0.1, size=self.model.nq) + self.init_qpos
while True:
self.goal = self.np_random.uniform(low=-.2, high=.2, size=2)
if np.linalg.norm(self.goal) < 0.2:
break
qpos[-2:] = self.goal
qvel = self.init_qvel + self.np_random.uniform(low=-.005, high=.005, size=self.model.nv)
qvel[-2:] = 0
self.set_state(qpos, qvel)
return self._get_obs()
def _get_obs(self):
theta = self.sim.data.qpos.flat[:5]
return np.concatenate([
np.cos(theta),
np.sin(theta),
self.sim.data.qpos.flat[5:], # this is goal position
self.sim.data.qvel.flat[:5], # this is angular velocity
self.get_body_com("fingertip") - self.get_body_com("target")
])