fancy_gym/alr_envs/alr/mujoco/ant_jump/mp_wrapper.py

32 lines
893 B
Python
Raw Normal View History

2022-04-13 17:28:25 +02:00
from typing import Tuple, Union
import numpy as np
2022-06-30 17:33:05 +02:00
from alr_envs.black_box.raw_interface_wrapper import RawInterfaceWrapper
2022-04-13 17:28:25 +02:00
2022-06-30 14:08:54 +02:00
class MPWrapper(RawInterfaceWrapper):
2022-04-13 17:28:25 +02:00
@property
2022-06-30 14:08:54 +02:00
def context_mask(self) -> np.ndarray:
2022-04-13 17:28:25 +02:00
return np.hstack([
[False] * 111, # ant has 111 dimensional observation space !!
[True] # goal height
])
@property
def current_pos(self) -> Union[float, int, np.ndarray]:
return self.env.sim.data.qpos[7:15].copy()
@property
def current_vel(self) -> Union[float, int, np.ndarray, Tuple]:
return self.env.sim.data.qvel[6:14].copy()
@property
def goal_pos(self) -> Union[float, int, np.ndarray, Tuple]:
raise ValueError("Goal position is not available and has to be learnt based on the environment.")
@property
def dt(self) -> Union[float, int]:
return self.env.dt