2021-07-02 13:09:56 +02:00
|
|
|
from typing import Tuple, Union
|
|
|
|
|
|
|
|
import numpy as np
|
|
|
|
|
2022-06-30 14:08:54 +02:00
|
|
|
from alr_envs.mp.raw_interface_wrapper import RawInterfaceWrapper
|
2021-07-02 13:09:56 +02:00
|
|
|
|
|
|
|
|
2022-06-30 14:08:54 +02:00
|
|
|
class MPWrapper(RawInterfaceWrapper):
|
2021-07-02 13:09:56 +02:00
|
|
|
|
|
|
|
@property
|
2022-06-30 14:08:54 +02:00
|
|
|
def context_mask(self) -> np.ndarray:
|
2021-07-26 17:07:17 +02:00
|
|
|
# Joint and target positions are randomized, velocities are always set to 0.
|
2021-07-02 13:09:56 +02:00
|
|
|
return np.hstack([
|
2021-07-26 17:07:17 +02:00
|
|
|
[True] * 2, # joint position
|
|
|
|
[True] * 2, # target position
|
|
|
|
[False] * 2, # joint velocity
|
2021-07-02 13:09:56 +02:00
|
|
|
])
|
|
|
|
|
|
|
|
@property
|
|
|
|
def current_pos(self) -> Union[float, int, np.ndarray]:
|
2021-07-26 17:07:17 +02:00
|
|
|
return self.env.physics.named.data.qpos[:]
|
2021-07-02 13:09:56 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def current_vel(self) -> Union[float, int, np.ndarray, Tuple]:
|
2021-07-26 17:07:17 +02:00
|
|
|
return self.env.physics.named.data.qvel[:]
|
2021-07-02 13:09:56 +02:00
|
|
|
|
|
|
|
@property
|
|
|
|
def goal_pos(self) -> Union[float, int, np.ndarray, Tuple]:
|
|
|
|
raise ValueError("Goal position is not available and has to be learnt based on the environment.")
|
|
|
|
|
|
|
|
@property
|
|
|
|
def dt(self) -> Union[float, int]:
|
|
|
|
return self.env.dt
|