fancy_gym/alr_envs/__init__.py

793 lines
21 KiB
Python
Raw Normal View History

import numpy as np
2020-08-28 18:31:06 +02:00
from gym.envs.registration import register
2021-06-25 16:16:56 +02:00
from alr_envs.classic_control.hole_reacher.hole_reacher_mp_wrapper import HoleReacherMPWrapper
from alr_envs.classic_control.simple_reacher.simple_reacher_mp_wrapper import SimpleReacherMPWrapper
from alr_envs.classic_control.viapoint_reacher.viapoint_reacher_mp_wrapper import ViaPointReacherMPWrapper
from alr_envs.dmc.manipulation.reach.reach_mp_wrapper import DMCReachSiteMPWrapper
from alr_envs.dmc.suite.ball_in_cup.ball_in_cup_mp_wrapper import DMCBallInCupMPWrapper
from alr_envs.dmc.suite.cartpole.cartpole_mp_wrapper import DMCCartpoleMPWrapper, DMCCartpoleThreePolesMPWrapper, \
DMCCartpoleTwoPolesMPWrapper
from alr_envs.dmc.suite.reacher.reacher_mp_wrapper import DMCReacherMPWrapper
# Mujoco
2020-11-03 11:26:06 +01:00
2021-06-25 16:16:56 +02:00
## Reacher
2020-08-28 18:31:06 +02:00
register(
id='ALRReacher-v0',
entry_point='alr_envs.mujoco:ALRReacherEnv',
2020-09-19 17:47:20 +02:00
max_episode_steps=200,
kwargs={
"steps_before_reward": 0,
2020-09-22 17:41:25 +02:00
"n_links": 5,
"balance": False,
2020-09-22 17:41:25 +02:00
}
)
register(
id='ALRReacherSparse-v0',
2020-09-26 15:07:42 +02:00
entry_point='alr_envs.mujoco:ALRReacherEnv',
max_episode_steps=200,
2020-09-26 15:07:42 +02:00
kwargs={
"steps_before_reward": 200,
2020-09-26 15:07:42 +02:00
"n_links": 5,
"balance": False,
2020-09-26 15:07:42 +02:00
}
)
2020-09-22 17:41:25 +02:00
register(
id='ALRReacherSparseBalanced-v0',
2020-09-22 17:41:25 +02:00
entry_point='alr_envs.mujoco:ALRReacherEnv',
max_episode_steps=200,
kwargs={
"steps_before_reward": 200,
"n_links": 5,
"balance": True,
2020-09-19 17:47:20 +02:00
}
)
register(
id='ALRLongReacher-v0',
2020-09-19 17:47:20 +02:00
entry_point='alr_envs.mujoco:ALRReacherEnv',
max_episode_steps=200,
2020-09-19 17:47:20 +02:00
kwargs={
"steps_before_reward": 0,
"n_links": 7,
"balance": False,
2020-09-19 17:47:20 +02:00
}
)
register(
id='ALRLongReacherSparse-v0',
2020-09-19 17:47:20 +02:00
entry_point='alr_envs.mujoco:ALRReacherEnv',
max_episode_steps=200,
kwargs={
"steps_before_reward": 200,
2020-09-19 17:47:20 +02:00
"n_links": 7,
"balance": False,
2020-09-19 17:47:20 +02:00
}
)
register(
id='ALRLongReacherSparseBalanced-v0',
2020-09-19 17:47:20 +02:00
entry_point='alr_envs.mujoco:ALRReacherEnv',
max_episode_steps=200,
kwargs={
"steps_before_reward": 200,
2020-09-19 17:47:20 +02:00
"n_links": 7,
"balance": True,
2020-09-19 17:47:20 +02:00
}
)
## Balancing Reacher
register(
id='Balancing-v0',
entry_point='alr_envs.mujoco:BalancingEnv',
max_episode_steps=200,
kwargs={
"n_links": 5,
}
)
# Classic control
2020-09-19 17:47:20 +02:00
## Simple Reacher
2020-08-28 18:31:06 +02:00
register(
id='SimpleReacher-v0',
entry_point='alr_envs.classic_control:SimpleReacherEnv',
max_episode_steps=200,
2020-08-28 18:31:06 +02:00
kwargs={
"n_links": 2,
}
)
2021-05-07 09:51:53 +02:00
register(
id='SimpleReacher-v1',
entry_point='alr_envs.classic_control:SimpleReacherEnv',
max_episode_steps=200,
kwargs={
"n_links": 2,
"random_start": False
}
)
2021-05-10 12:17:52 +02:00
register(
id='LongSimpleReacher-v0',
entry_point='alr_envs.classic_control:SimpleReacherEnv',
2021-05-10 12:17:52 +02:00
max_episode_steps=200,
kwargs={
"n_links": 5,
2021-05-10 12:17:52 +02:00
}
)
register(
id='LongSimpleReacher-v1',
entry_point='alr_envs.classic_control:SimpleReacherEnv',
2021-05-10 12:17:52 +02:00
max_episode_steps=200,
kwargs={
"n_links": 5,
2021-05-10 12:17:52 +02:00
"random_start": False
}
)
## Viapoint Reacher
2021-03-26 15:32:50 +01:00
register(
id='ViaPointReacher-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.classic_control:ViaPointReacher',
2021-03-26 15:32:50 +01:00
max_episode_steps=200,
kwargs={
"n_links": 5,
"allow_self_collision": False,
"collision_penalty": 1000
2021-03-26 15:32:50 +01:00
}
)
## Hole Reacher
register(
id='HoleReacher-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.classic_control:HoleReacherEnv',
max_episode_steps=200,
kwargs={
"n_links": 5,
2021-05-18 15:27:08 +02:00
"random_start": True,
"allow_self_collision": False,
"allow_wall_collision": False,
"hole_width": None,
"hole_depth": 1,
"hole_x": None,
"collision_penalty": 100,
2020-08-28 18:31:06 +02:00
}
)
2020-11-03 11:26:06 +01:00
register(
id='HoleReacher-v1',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.classic_control:HoleReacherEnv',
2020-11-03 11:26:06 +01:00
max_episode_steps=200,
kwargs={
"n_links": 5,
"random_start": False,
"allow_self_collision": False,
"allow_wall_collision": False,
"hole_width": 0.25,
"hole_depth": 1,
"hole_x": None,
"collision_penalty": 100,
2020-11-03 11:26:06 +01:00
}
)
2021-05-07 09:51:53 +02:00
register(
id='HoleReacher-v2',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.classic_control:HoleReacherEnv',
2021-05-07 09:51:53 +02:00
max_episode_steps=200,
kwargs={
"n_links": 5,
"random_start": False,
2021-05-07 09:51:53 +02:00
"allow_self_collision": False,
"allow_wall_collision": False,
"hole_width": 0.25,
"hole_depth": 1,
"hole_x": 2,
"collision_penalty": 100,
2021-05-07 09:51:53 +02:00
}
)
# Motion Primitive Environments
2021-06-25 16:16:56 +02:00
2021-05-17 17:58:33 +02:00
## Simple Reacher
versions = ["SimpleReacher-v0", "SimpleReacher-v1", "LongSimpleReacher-v0", "LongSimpleReacher-v1"]
for v in versions:
name = v.split("-")
register(
id=f'{name[0]}DMP-{name[1]}',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
kwargs={
2021-05-17 17:58:33 +02:00
"name": f"alr_envs:{v}",
2021-06-25 16:16:56 +02:00
"wrappers": [SimpleReacherMPWrapper],
"mp_kwargs": {
"num_dof": 2 if "long" not in v.lower() else 5,
"num_basis": 5,
"duration": 20,
2021-06-25 16:16:56 +02:00
"alpha_phase": 2,
"learn_goal": True,
"policy_type": "velocity",
"weights_scale": 50,
}
}
)
2021-05-07 09:51:53 +02:00
register(
id='ViaPointReacherDMP-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
2021-05-07 09:51:53 +02:00
# max_episode_steps=1,
kwargs={
"name": "alr_envs:ViaPointReacher-v0",
2021-06-25 16:16:56 +02:00
"wrappers": [ViaPointReacherMPWrapper],
"mp_kwargs": {
"num_dof": 5,
"num_basis": 5,
"duration": 2,
"learn_goal": True,
"alpha_phase": 2,
"policy_type": "velocity",
"weights_scale": 50,
}
2021-05-07 09:51:53 +02:00
}
)
2021-05-17 17:58:33 +02:00
## Hole Reacher
versions = ["v0", "v1", "v2"]
for v in versions:
register(
id=f'HoleReacherDMP-{v}',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
2021-05-17 17:58:33 +02:00
# max_episode_steps=1,
kwargs={
"name": f"alr_envs:HoleReacher-{v}",
2021-06-25 16:16:56 +02:00
"wrappers": [HoleReacherMPWrapper],
"mp_kwargs": {
"num_dof": 5,
"num_basis": 5,
"duration": 2,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "velocity",
"weights_scale": 50,
"goal_scale": 0.1
}
2021-05-17 17:58:33 +02:00
}
)
register(
id=f'HoleReacherDetPMP-{v}',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"alr_envs:HoleReacher-{v}",
2021-06-25 16:16:56 +02:00
"wrappers": [HoleReacherMPWrapper],
"mp_kwargs": {
"num_dof": 5,
"num_basis": 5,
"duration": 2,
"width": 0.025,
"policy_type": "velocity",
"weights_scale": 0.2,
"zero_start": True
}
}
)
2021-05-07 09:51:53 +02:00
## Deep Mind Control Suite (DMC)
### Suite
# tasks = ["ball_in_cup-catch", "reacher-easy", "reacher-hard", "cartpole-balance", "cartpole-balance_sparse",
# "cartpole-swingup", "cartpole-swingup_sparse", "cartpole-two_poles", "cartpole-three_poles"]
# wrappers = [DMCBallInCupMPWrapper, DMCReacherMPWrapper, DMCReacherMPWrapper, DMCCartpoleMPWrapper,
# partial(DMCCartpoleMPWrapper)]
# for t, w in zip(tasks, wrappers):
register(
id=f'dmc_ball_in_cup-catch_dmp-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
2021-04-21 10:45:34 +02:00
kwargs={
"name": f"ball_in_cup-catch",
"time_limit": 1,
"episode_length": 50,
"wrappers": [DMCBallInCupMPWrapper],
2021-06-25 16:16:56 +02:00
"mp_kwargs": {
"num_dof": 2,
2021-06-25 16:16:56 +02:00
"num_basis": 5,
"duration": 1,
"learn_goal": True,
2021-06-25 16:16:56 +02:00
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "motor",
2021-06-25 16:16:56 +02:00
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
2021-06-25 16:16:56 +02:00
}
2021-04-21 10:45:34 +02:00
}
)
register(
id=f'dmc_ball_in_cup-catch_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"ball_in_cup-catch",
"time_limit": 1,
"episode_length": 50,
"wrappers": [DMCBallInCupMPWrapper],
"mp_kwargs": {
"num_dof": 2,
"num_basis": 5,
"duration": 1,
"width": 0.025,
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
2021-04-23 11:37:42 +02:00
# TODO tune gains and episode length for all below
register(
id=f'dmc_reacher-easy_dmp-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
kwargs={
"name": f"reacher-easy",
"time_limit": 1,
"episode_length": 50,
"wrappers": [DMCReacherMPWrapper],
"mp_kwargs": {
"num_dof": 2,
"num_basis": 5,
"duration": 1,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_reacher-easy_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
2021-04-23 11:37:42 +02:00
kwargs={
"name": f"reacher-easy",
"time_limit": 1,
"episode_length": 50,
"wrappers": [DMCReacherMPWrapper],
2021-06-25 16:16:56 +02:00
"mp_kwargs": {
"num_dof": 2,
2021-06-25 16:16:56 +02:00
"num_basis": 5,
"duration": 1,
"width": 0.025,
2021-06-25 16:16:56 +02:00
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
2021-06-25 16:16:56 +02:00
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
2021-06-25 16:16:56 +02:00
}
}
2021-04-23 11:37:42 +02:00
}
2021-04-21 10:45:34 +02:00
)
2021-04-23 12:16:19 +02:00
register(
id=f'dmc_reacher-hard_dmp-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
2021-04-23 12:16:19 +02:00
kwargs={
"name": f"reacher-hard",
"time_limit": 1,
"episode_length": 50,
"wrappers": [DMCReacherMPWrapper],
2021-06-25 16:16:56 +02:00
"mp_kwargs": {
"num_dof": 2,
2021-06-25 16:16:56 +02:00
"num_basis": 5,
"duration": 1,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
2021-06-25 16:16:56 +02:00
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
2021-06-25 16:16:56 +02:00
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
2021-06-25 16:16:56 +02:00
}
}
2021-04-23 12:16:19 +02:00
}
)
register(
id=f'dmc_reacher-hard_detpmp-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"reacher-hard",
"time_limit": 1,
"episode_length": 50,
"wrappers": [DMCReacherMPWrapper],
2021-06-25 16:16:56 +02:00
"mp_kwargs": {
"num_dof": 2,
2021-06-25 16:16:56 +02:00
"num_basis": 5,
"duration": 1,
"width": 0.025,
2021-06-25 16:16:56 +02:00
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_cartpole-balance_dmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
kwargs={
"name": f"cartpole-balance",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
2021-06-25 16:16:56 +02:00
}
}
}
)
2021-06-23 18:23:37 +02:00
register(
id=f'dmc_cartpole-balance_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
2021-06-23 18:23:37 +02:00
kwargs={
"name": f"cartpole-balance",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
2021-06-25 16:16:56 +02:00
"mp_kwargs": {
"num_dof": 1,
2021-06-25 16:16:56 +02:00
"num_basis": 5,
"duration": 10,
"width": 0.025,
2021-06-25 16:16:56 +02:00
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
2021-06-25 16:16:56 +02:00
}
}
2021-06-23 18:23:37 +02:00
}
)
2021-06-24 18:34:39 +02:00
register(
id=f'dmc_cartpole-balance_sparse_dmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
2021-06-24 18:34:39 +02:00
kwargs={
"name": f"cartpole-balance_sparse",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
2021-06-24 18:34:39 +02:00
}
)
register(
id=f'dmc_cartpole-balance_sparse_detpmp-v0',
2021-06-25 16:16:56 +02:00
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"cartpole-balance_sparse",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
2021-06-25 16:16:56 +02:00
"mp_kwargs": {
"num_dof": 1,
2021-06-25 16:16:56 +02:00
"num_basis": 5,
"duration": 10,
"width": 0.025,
2021-06-25 16:16:56 +02:00
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
2021-06-25 16:16:56 +02:00
}
}
}
)
2021-04-21 10:45:34 +02:00
register(
id=f'dmc_cartpole-swingup_dmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
2021-04-21 10:45:34 +02:00
kwargs={
"name": f"cartpole-swingup",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
2021-06-25 16:16:56 +02:00
"mp_kwargs": {
"num_dof": 1,
2021-06-25 16:16:56 +02:00
"num_basis": 5,
"duration": 10,
2021-06-25 16:16:56 +02:00
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
2021-06-25 16:16:56 +02:00
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
2021-06-25 16:16:56 +02:00
}
}
2021-04-21 10:45:34 +02:00
}
)
register(
id=f'dmc_cartpole-swingup_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"cartpole-swingup",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"width": 0.025,
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_cartpole-swingup_sparse_dmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
kwargs={
"name": f"cartpole-swingup_sparse",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_cartpole-swingup_sparse_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"cartpole-swingup_sparse",
# "time_limit": 1,
"episode_length": 1000,
"wrappers": [DMCCartpoleMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"width": 0.025,
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_cartpole-two_poles_dmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
kwargs={
"name": f"cartpole-two_poles",
# "time_limit": 1,
"episode_length": 1000,
# "wrappers": [partial(DMCCartpoleMPWrapper, n_poles=2)],
"wrappers": [DMCCartpoleTwoPolesMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
2021-07-02 17:35:40 +02:00
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_cartpole-two_poles_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"cartpole-two_poles",
# "time_limit": 1,
"episode_length": 1000,
# "wrappers": [partial(DMCCartpoleMPWrapper, n_poles=2)],
"wrappers": [DMCCartpoleTwoPolesMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"width": 0.025,
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_cartpole-three_poles_dmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
kwargs={
"name": f"cartpole-three_poles",
# "time_limit": 1,
"episode_length": 1000,
# "wrappers": [partial(DMCCartpoleMPWrapper, n_poles=3)],
"wrappers": [DMCCartpoleThreePolesMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "motor",
"weights_scale": 50,
"goal_scale": 0.1,
"policy_kwargs": {
"p_gains": 50,
"d_gains": 1
}
}
}
)
register(
id=f'dmc_cartpole-three_poles_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"cartpole-three_poles",
# "time_limit": 1,
"episode_length": 1000,
# "wrappers": [partial(DMCCartpoleMPWrapper, n_poles=3)],
"wrappers": [DMCCartpoleThreePolesMPWrapper],
"mp_kwargs": {
"num_dof": 1,
"num_basis": 5,
"duration": 10,
"width": 0.025,
2021-07-02 17:35:40 +02:00
"policy_type": "motor",
"weights_scale": 0.2,
"zero_start": True,
"policy_kwargs": {
2021-07-02 17:35:40 +02:00
"p_gains": 50,
"d_gains": 1
}
}
}
)
### Manipulation
register(
id=f'dmc_manipulation-reach_site_dmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_dmp_env_helper',
# max_episode_steps=1,
kwargs={
"name": f"manipulation-reach_site_features",
# "time_limit": 1,
"episode_length": 250,
"wrappers": [DMCReachSiteMPWrapper],
"mp_kwargs": {
"num_dof": 9,
"num_basis": 5,
"duration": 10,
"learn_goal": True,
"alpha_phase": 2,
"bandwidth_factor": 2,
"policy_type": "velocity",
"weights_scale": 50,
"goal_scale": 0.1,
2020-11-03 11:26:06 +01:00
}
}
)
register(
id=f'dmc_manipulation-reach_site_detpmp-v0',
entry_point='alr_envs.utils.make_env_helpers:make_detpmp_env_helper',
kwargs={
"name": f"manipulation-reach_site_features",
# "time_limit": 1,
"episode_length": 250,
"wrappers": [DMCReachSiteMPWrapper],
"mp_kwargs": {
"num_dof": 9,
"num_basis": 5,
"duration": 10,
"width": 0.025,
"policy_type": "velocity",
"weights_scale": 0.2,
"zero_start": True,
}
}
)