fancy_gym/alr_envs/alr/classic_control/hole_reacher/mp_wrapper.py
Maximilian Huettenrauch 95560c2bab wip
2021-12-01 14:55:27 +01:00

44 lines
1.3 KiB
Python

from typing import Tuple, Union
import numpy as np
from mp_env_api import MPEnvWrapper
class MPWrapper(MPEnvWrapper):
@property
def active_obs(self):
return np.hstack([
[self.env.random_start] * self.env.n_links, # cos
[self.env.random_start] * self.env.n_links, # sin
[self.env.random_start] * self.env.n_links, # velocity
[self.env.initial_width is None], # hole width
# [self.env.hole_depth is None], # hole depth
[True] * 2, # x-y coordinates of target distance
[False] # env steps
])
# @property
# def current_pos(self) -> Union[float, int, np.ndarray, Tuple]:
# return self._joint_angles.copy()
#
# @property
# def current_vel(self) -> Union[float, int, np.ndarray, Tuple]:
# return self._angle_velocity.copy()
@property
def current_pos(self) -> Union[float, int, np.ndarray, Tuple]:
return self.env.current_pos
@property
def current_vel(self) -> Union[float, int, np.ndarray, Tuple]:
return self.env.current_vel
@property
def goal_pos(self) -> Union[float, int, np.ndarray, Tuple]:
raise ValueError("Goal position is not available and has to be learnt based on the environment.")
@property
def dt(self) -> Union[float, int]:
return self.env.dt