Adopt new interface structure

This commit is contained in:
Marcel 2021-07-05 09:52:41 +02:00
parent 0046ade102
commit 92e6a84d03
10 changed files with 7 additions and 30 deletions

View File

@ -3,7 +3,8 @@
This repository collects custom Robotics environments not included in benchmark suites like OpenAI gym, rllab, etc.
Creating a custom (Mujoco) gym environment can be done according to [this guide](https://github.com/openai/gym/blob/master/docs/creating-environments.md).
For stochastic search problems with gym interface use the `Rosenbrock-v0` reference implementation.
We also support to solve environments with DMPs. When adding new DMP tasks check the `ViaPointReacherDMP-v0` reference implementation.
We also support to solve environments with Dynamic Movement Primitives (DMPs) and Probabilistic Movement Primitives (DetPMP, we only consider the mean usually).
When adding new DMP tasks check the `ViaPointReacherDMP-v0` reference implementation.
When simply using the tasks, you can also leverage the wrapper class `DmpWrapper` to turn normal gym environments in to DMP tasks.
## Environments

View File

@ -1,7 +1,7 @@
from typing import Union
import numpy as np
from mp_env_api.env_wrappers.mp_env_wrapper import MPEnvWrapper
from mp_env_api.interface_wrappers.mp_env_wrapper import MPEnvWrapper
class MPWrapper(MPEnvWrapper):

View File

@ -1,7 +1,7 @@
from typing import Union
import numpy as np
from mp_env_api.env_wrappers.mp_env_wrapper import MPEnvWrapper
from mp_env_api.interface_wrappers.mp_env_wrapper import MPEnvWrapper
class MPWrapper(MPEnvWrapper):

View File

@ -1,7 +1,7 @@
from typing import Union
import numpy as np
from mp_env_api.env_wrappers.mp_env_wrapper import MPEnvWrapper
from mp_env_api.interface_wrappers.mp_env_wrapper import MPEnvWrapper
class MPWrapper(MPEnvWrapper):
@ -14,10 +14,6 @@ class MPWrapper(MPEnvWrapper):
def current_pos(self) -> Union[float, int, np.ndarray]:
return self.sim.data.qpos[:2]
@property
def goal_pos(self):
return self.goal
@property
def dt(self) -> Union[float, int]:
return self.env.dt

View File

@ -1,10 +0,0 @@
Metadata-Version: 1.0
Name: reacher
Version: 0.0.1
Summary: UNKNOWN
Home-page: UNKNOWN
Author: UNKNOWN
Author-email: UNKNOWN
License: UNKNOWN
Description: UNKNOWN
Platform: UNKNOWN

View File

@ -1,7 +0,0 @@
README.md
setup.py
reacher.egg-info/PKG-INFO
reacher.egg-info/SOURCES.txt
reacher.egg-info/dependency_links.txt
reacher.egg-info/requires.txt
reacher.egg-info/top_level.txt

View File

@ -1 +0,0 @@

View File

@ -1 +0,0 @@
gym

View File

@ -1 +0,0 @@

View File

@ -3,14 +3,14 @@ from setuptools import setup
setup(
name='alr_envs',
version='0.0.1',
packages=['alr_envs', 'alr_envs.classic_control', 'alr_envs.mujoco', 'alr_envs.stochastic_search',
packages=['alr_envs', 'alr_envs.classic_control', 'alr_envs.open_ai', 'alr_envs.mujoco', 'alr_envs.stochastic_search',
'alr_envs.utils'],
install_requires=[
'gym',
'PyQt5',
'matplotlib',
'mp_env_api @ git+ssh://git@github.com/ALRhub/motion_primitive_env_api.git',
'mujoco_py',
'mujoco-py<2.1,>=2.0',
'dm_control'
],