updated test to pytest
This commit is contained in:
parent
536e78da23
commit
2875e07947
@ -1,127 +1,42 @@
|
|||||||
import unittest
|
|
||||||
|
|
||||||
import gym
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
|
|
||||||
from dm_control import suite, manipulation
|
import pytest
|
||||||
|
|
||||||
from alr_envs import make
|
from dm_control import suite, manipulation
|
||||||
|
|
||||||
DMC_ENVS = [f'{env}-{task}' for env, task in suite.ALL_TASKS if env != "lqr"]
|
DMC_ENVS = [f'{env}-{task}' for env, task in suite.ALL_TASKS if env != "lqr"]
|
||||||
MANIPULATION_SPECS = [f'manipulation-{task}' for task in manipulation.ALL if task.endswith('_features')]
|
MANIPULATION_SPECS = [f'manipulation-{task}' for task in manipulation.ALL if task.endswith('_features')]
|
||||||
SEED = 1
|
SEED = 1
|
||||||
|
|
||||||
|
|
||||||
class TestStepDMCEnvironments(unittest.TestCase):
|
@pytest.mark.parametrize('env_id', DMC_ENVS)
|
||||||
|
def test_dmc_functionality(self, env_id: str):
|
||||||
def _run_env(self, env_id, iterations=None, seed=SEED, render=False):
|
|
||||||
"""
|
|
||||||
Example for running a DMC based env in the step based setting.
|
|
||||||
The env_id has to be specified as `domain_name-task_name` or
|
|
||||||
for manipulation tasks as `manipulation-environment_name`
|
|
||||||
|
|
||||||
Args:
|
|
||||||
env_id: Either `domain_name-task_name` or `manipulation-environment_name`
|
|
||||||
iterations: Number of rollout steps to run
|
|
||||||
seed= random seeding
|
|
||||||
render: Render the episode
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
|
|
||||||
"""
|
|
||||||
env: gym.Env = make(env_id, seed=seed)
|
|
||||||
rewards = []
|
|
||||||
observations = []
|
|
||||||
dones = []
|
|
||||||
obs = env.reset()
|
|
||||||
self._verify_observations(obs, env.observation_space, "reset()")
|
|
||||||
|
|
||||||
length = env.spec.max_episode_steps
|
|
||||||
if iterations is None:
|
|
||||||
if length is None:
|
|
||||||
iterations = 1
|
|
||||||
else:
|
|
||||||
iterations = length
|
|
||||||
|
|
||||||
# number of samples(multiple environment steps)
|
|
||||||
for i in range(iterations):
|
|
||||||
observations.append(obs)
|
|
||||||
|
|
||||||
ac = env.action_space.sample()
|
|
||||||
# ac = np.random.uniform(env.action_space.low, env.action_space.high, env.action_space.shape)
|
|
||||||
obs, reward, done, info = env.step(ac)
|
|
||||||
|
|
||||||
self._verify_observations(obs, env.observation_space, "step()")
|
|
||||||
self._verify_reward(reward)
|
|
||||||
self._verify_done(done)
|
|
||||||
|
|
||||||
rewards.append(reward)
|
|
||||||
dones.append(done)
|
|
||||||
|
|
||||||
if render:
|
|
||||||
env.render("human")
|
|
||||||
|
|
||||||
if done:
|
|
||||||
obs = env.reset()
|
|
||||||
|
|
||||||
assert done, "Done flag is not True after max episode length."
|
|
||||||
observations.append(obs)
|
|
||||||
env.close()
|
|
||||||
del env
|
|
||||||
return np.array(observations), np.array(rewards), np.array(dones)
|
|
||||||
|
|
||||||
def _verify_observations(self, obs, observation_space, obs_type="reset()"):
|
|
||||||
self.assertTrue(observation_space.contains(obs),
|
|
||||||
f"Observation {obs} received from {obs_type} "
|
|
||||||
f"not contained in observation space {observation_space}.")
|
|
||||||
|
|
||||||
def _verify_reward(self, reward):
|
|
||||||
self.assertIsInstance(reward, float, f"Returned {reward} as reward, expected float.")
|
|
||||||
|
|
||||||
def _verify_done(self, done):
|
|
||||||
self.assertIsInstance(done, bool, f"Returned {done} as done flag, expected bool.")
|
|
||||||
|
|
||||||
def test_dmc_functionality(self):
|
|
||||||
"""Tests that environments runs without errors using random actions."""
|
"""Tests that environments runs without errors using random actions."""
|
||||||
for env_id in DMC_ENVS:
|
self.run_env(env_id)
|
||||||
with self.subTest(msg=env_id):
|
|
||||||
self._run_env(env_id)
|
|
||||||
|
|
||||||
def test_dmc_determinism(self):
|
|
||||||
|
@pytest.mark.parametrize('env_id', DMC_ENVS)
|
||||||
|
def test_dmc_determinism(self, env_id: str):
|
||||||
|
"""Tests that identical seeds produce identical trajectories."""
|
||||||
|
seed = 0
|
||||||
|
self._run_env_determinism(env_id, seed)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('env_id', MANIPULATION_SPECS)
|
||||||
|
def test_manipulation_functionality(self, env_id: str):
|
||||||
|
"""Tests that environments runs without errors using random actions."""
|
||||||
|
self.run_env(env_id)
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.mark.parametrize('env_id', MANIPULATION_SPECS)
|
||||||
|
def test_manipulation_determinism(self, env_id: str):
|
||||||
"""Tests that identical seeds produce identical trajectories."""
|
"""Tests that identical seeds produce identical trajectories."""
|
||||||
seed = 0
|
seed = 0
|
||||||
# Iterate over two trajectories, which should have the same state and action sequence
|
# Iterate over two trajectories, which should have the same state and action sequence
|
||||||
for env_id in DMC_ENVS:
|
traj1 = self.run_env(env_id, seed=seed)
|
||||||
with self.subTest(msg=env_id):
|
traj2 = self.run_env(env_id, seed=seed)
|
||||||
traj1 = self._run_env(env_id, seed=seed)
|
|
||||||
traj2 = self._run_env(env_id, seed=seed)
|
|
||||||
for i, time_step in enumerate(zip(*traj1, *traj2)):
|
for i, time_step in enumerate(zip(*traj1, *traj2)):
|
||||||
obs1, rwd1, done1, obs2, rwd2, done2 = time_step
|
obs1, rwd1, done1, obs2, rwd2, done2 = time_step
|
||||||
self.assertTrue(np.array_equal(obs1, obs2), f"Observations [{i}] {obs1} and {obs2} do not match.")
|
assert np.array_equal(obs1, obs2), f"Observations [{i}] {obs1} and {obs2} do not match."
|
||||||
self.assertEqual(rwd1, rwd2, f"Rewards [{i}] {rwd1} and {rwd2} do not match.")
|
assert np.all(rwd1 == rwd2), f"Rewards [{i}] {rwd1} and {rwd2} do not match."
|
||||||
self.assertEqual(done1, done2, f"Dones [{i}] {done1} and {done2} do not match.")
|
assert np.all(done1 == done2), f"Dones [{i}] {done1} and {done2} do not match."
|
||||||
|
|
||||||
def test_manipulation_functionality(self):
|
|
||||||
"""Tests that environments runs without errors using random actions."""
|
|
||||||
for env_id in MANIPULATION_SPECS:
|
|
||||||
with self.subTest(msg=env_id):
|
|
||||||
self._run_env(env_id)
|
|
||||||
|
|
||||||
def test_manipulation_determinism(self):
|
|
||||||
"""Tests that identical seeds produce identical trajectories."""
|
|
||||||
seed = 0
|
|
||||||
# Iterate over two trajectories, which should have the same state and action sequence
|
|
||||||
for env_id in MANIPULATION_SPECS:
|
|
||||||
with self.subTest(msg=env_id):
|
|
||||||
traj1 = self._run_env(env_id, seed=seed)
|
|
||||||
traj2 = self._run_env(env_id, seed=seed)
|
|
||||||
for i, time_step in enumerate(zip(*traj1, *traj2)):
|
|
||||||
obs1, rwd1, done1, obs2, rwd2, done2 = time_step
|
|
||||||
self.assertTrue(np.array_equal(obs1, obs2), f"Observations [{i}] {obs1} and {obs2} do not match.")
|
|
||||||
self.assertEqual(rwd1, rwd2, f"Rewards [{i}] {rwd1} and {rwd2} do not match.")
|
|
||||||
self.assertEqual(done1, done2, f"Dones [{i}] {done1} and {done2} do not match.")
|
|
||||||
self.assertEqual(done1, done2, f"Dones [{i}] {done1} and {done2} do not match.")
|
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
|
@ -1,143 +1,63 @@
|
|||||||
import unittest
|
import fancy_gym
|
||||||
|
|
||||||
import gym
|
import gym
|
||||||
import numpy as np
|
import pytest
|
||||||
|
|
||||||
import alr_envs # noqa
|
from test.utils import run_env
|
||||||
from alr_envs.utils.make_env_helpers import make
|
|
||||||
|
|
||||||
ALL_SPECS = list(spec for spec in gym.envs.registry.all() if "alr_envs" in spec.entry_point)
|
ALL_SPECS = list(spec for spec in gym.envs.registry.all() if "alr_envs" in spec.entry_point)
|
||||||
SEED = 1
|
SEED = 1
|
||||||
|
|
||||||
|
|
||||||
class TestMPEnvironments(unittest.TestCase):
|
@pytest.mark.parametrize('env_id', fancy_gym.ALL_FANCY_MOVEMENT_PRIMITIVE_ENVIRONMENTS['DMP'])
|
||||||
|
def test_custom_dmp_functionality(env_id):
|
||||||
|
"""Tests that environments runs without errors using random actions for custom DMP envs."""
|
||||||
|
run_env(env_id)
|
||||||
|
|
||||||
def _run_env(self, env_id, iterations=None, seed=SEED, render=False):
|
|
||||||
"""
|
|
||||||
Example for running a DMC based env in the step based setting.
|
|
||||||
The env_id has to be specified as `domain_name-task_name` or
|
|
||||||
for manipulation tasks as `manipulation-environment_name`
|
|
||||||
|
|
||||||
Args:
|
@pytest.mark.parametrize('env_id', fancy_gym.ALL_FANCY_MOVEMENT_PRIMITIVE_ENVIRONMENTS['ProMP'])
|
||||||
env_id: Either `domain_name-task_name` or `manipulation-environment_name`
|
def test_custom_promp_functionality(env_id):
|
||||||
iterations: Number of rollout steps to run
|
"""Tests that environments runs without errors using random actions for custom ProMP envs."""
|
||||||
seed= random seeding
|
run_env(env_id)
|
||||||
render: Render the episode
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
|
|
||||||
"""
|
|
||||||
env: gym.Env = make(env_id, seed=seed)
|
|
||||||
rewards = []
|
|
||||||
observations = []
|
|
||||||
dones = []
|
|
||||||
obs = env.reset()
|
|
||||||
self._verify_observations(obs, env.observation_space, "reset()")
|
|
||||||
|
|
||||||
length = env.spec.max_episode_steps
|
|
||||||
if iterations is None:
|
|
||||||
if length is None:
|
|
||||||
iterations = 1
|
|
||||||
else:
|
|
||||||
iterations = length
|
|
||||||
|
|
||||||
# number of samples(multiple environment steps)
|
|
||||||
for i in range(iterations):
|
|
||||||
observations.append(obs)
|
|
||||||
|
|
||||||
ac = env.action_space.sample()
|
|
||||||
# ac = np.random.uniform(env.action_space.low, env.action_space.high, env.action_space.shape)
|
|
||||||
obs, reward, done, info = env.step(ac)
|
|
||||||
|
|
||||||
self._verify_observations(obs, env.observation_space, "step()")
|
|
||||||
self._verify_reward(reward)
|
|
||||||
self._verify_done(done)
|
|
||||||
|
|
||||||
rewards.append(reward)
|
|
||||||
dones.append(done)
|
|
||||||
|
|
||||||
if render:
|
|
||||||
env.render("human")
|
|
||||||
|
|
||||||
if done:
|
|
||||||
obs = env.reset()
|
|
||||||
|
|
||||||
assert done, "Done flag is not True after max episode length."
|
|
||||||
observations.append(obs)
|
|
||||||
env.close()
|
|
||||||
del env
|
|
||||||
return np.array(observations), np.array(rewards), np.array(dones)
|
|
||||||
|
|
||||||
def _run_env_determinism(self, ids):
|
|
||||||
seed = 0
|
|
||||||
for env_id in ids:
|
|
||||||
with self.subTest(msg=env_id):
|
|
||||||
traj1 = self._run_env(env_id, seed=seed)
|
|
||||||
traj2 = self._run_env(env_id, seed=seed)
|
|
||||||
for i, time_step in enumerate(zip(*traj1, *traj2)):
|
|
||||||
obs1, rwd1, done1, obs2, rwd2, done2 = time_step
|
|
||||||
self.assertTrue(np.array_equal(obs1, obs2), f"Observations [{i}] {obs1} and {obs2} do not match.")
|
|
||||||
self.assertEqual(rwd1, rwd2, f"Rewards [{i}] {rwd1} and {rwd2} do not match.")
|
|
||||||
self.assertEqual(done1, done2, f"Dones [{i}] {done1} and {done2} do not match.")
|
|
||||||
|
|
||||||
def _verify_observations(self, obs, observation_space, obs_type="reset()"):
|
|
||||||
self.assertTrue(observation_space.contains(obs),
|
|
||||||
f"Observation {obs} received from {obs_type} "
|
|
||||||
f"not contained in observation space {observation_space}.")
|
|
||||||
|
|
||||||
def _verify_reward(self, reward):
|
|
||||||
self.assertIsInstance(reward, float, f"Returned {reward} as reward, expected float.")
|
|
||||||
|
|
||||||
def _verify_done(self, done):
|
|
||||||
self.assertIsInstance(done, bool, f"Returned {done} as done flag, expected bool.")
|
|
||||||
|
|
||||||
def test_alr_environment_functionality(self):
|
|
||||||
"""Tests that environments runs without errors using random actions for ALR MP envs."""
|
|
||||||
with self.subTest(msg="DMP"):
|
|
||||||
for env_id in alr_envs.ALL_ALR_MOTION_PRIMITIVE_ENVIRONMENTS['DMP']:
|
|
||||||
with self.subTest(msg=env_id):
|
|
||||||
self._run_env(env_id)
|
|
||||||
|
|
||||||
with self.subTest(msg="ProMP"):
|
|
||||||
for env_id in alr_envs.ALL_ALR_MOTION_PRIMITIVE_ENVIRONMENTS['ProMP']:
|
|
||||||
with self.subTest(msg=env_id):
|
|
||||||
self._run_env(env_id)
|
|
||||||
|
|
||||||
def test_openai_environment_functionality(self):
|
def test_openai_environment_functionality(self):
|
||||||
"""Tests that environments runs without errors using random actions for OpenAI gym MP envs."""
|
"""Tests that environments runs without errors using random actions for OpenAI gym MP envs."""
|
||||||
with self.subTest(msg="DMP"):
|
with self.subTest(msg="DMP"):
|
||||||
for env_id in alr_envs.ALL_GYM_MOTION_PRIMITIVE_ENVIRONMENTS['DMP']:
|
for env_id in alr_envs.ALL_GYM_MOTION_PRIMITIVE_ENVIRONMENTS['DMP']:
|
||||||
with self.subTest(msg=env_id):
|
with self.subTest(msg=env_id):
|
||||||
self._run_env(env_id)
|
self.run_env(env_id)
|
||||||
|
|
||||||
with self.subTest(msg="ProMP"):
|
with self.subTest(msg="ProMP"):
|
||||||
for env_id in alr_envs.ALL_GYM_MOTION_PRIMITIVE_ENVIRONMENTS['ProMP']:
|
for env_id in alr_envs.ALL_GYM_MOTION_PRIMITIVE_ENVIRONMENTS['ProMP']:
|
||||||
with self.subTest(msg=env_id):
|
with self.subTest(msg=env_id):
|
||||||
self._run_env(env_id)
|
self.run_env(env_id)
|
||||||
|
|
||||||
|
|
||||||
def test_dmc_environment_functionality(self):
|
def test_dmc_environment_functionality(self):
|
||||||
"""Tests that environments runs without errors using random actions for DMC MP envs."""
|
"""Tests that environments runs without errors using random actions for DMC MP envs."""
|
||||||
with self.subTest(msg="DMP"):
|
with self.subTest(msg="DMP"):
|
||||||
for env_id in alr_envs.ALL_DEEPMIND_MOTION_PRIMITIVE_ENVIRONMENTS['DMP']:
|
for env_id in alr_envs.ALL_DEEPMIND_MOTION_PRIMITIVE_ENVIRONMENTS['DMP']:
|
||||||
with self.subTest(msg=env_id):
|
with self.subTest(msg=env_id):
|
||||||
self._run_env(env_id)
|
self.run_env(env_id)
|
||||||
|
|
||||||
with self.subTest(msg="ProMP"):
|
with self.subTest(msg="ProMP"):
|
||||||
for env_id in alr_envs.ALL_DEEPMIND_MOTION_PRIMITIVE_ENVIRONMENTS['ProMP']:
|
for env_id in alr_envs.ALL_DEEPMIND_MOTION_PRIMITIVE_ENVIRONMENTS['ProMP']:
|
||||||
with self.subTest(msg=env_id):
|
with self.subTest(msg=env_id):
|
||||||
self._run_env(env_id)
|
self.run_env(env_id)
|
||||||
|
|
||||||
|
|
||||||
def test_metaworld_environment_functionality(self):
|
def test_metaworld_environment_functionality(self):
|
||||||
"""Tests that environments runs without errors using random actions for Metaworld MP envs."""
|
"""Tests that environments runs without errors using random actions for Metaworld MP envs."""
|
||||||
with self.subTest(msg="DMP"):
|
with self.subTest(msg="DMP"):
|
||||||
for env_id in alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS['DMP']:
|
for env_id in alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS['DMP']:
|
||||||
with self.subTest(msg=env_id):
|
with self.subTest(msg=env_id):
|
||||||
self._run_env(env_id)
|
self.run_env(env_id)
|
||||||
|
|
||||||
with self.subTest(msg="ProMP"):
|
with self.subTest(msg="ProMP"):
|
||||||
for env_id in alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS['ProMP']:
|
for env_id in alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS['ProMP']:
|
||||||
with self.subTest(msg=env_id):
|
with self.subTest(msg=env_id):
|
||||||
self._run_env(env_id)
|
self.run_env(env_id)
|
||||||
|
|
||||||
|
|
||||||
def test_alr_environment_determinism(self):
|
def test_alr_environment_determinism(self):
|
||||||
"""Tests that identical seeds produce identical trajectories for ALR MP Envs."""
|
"""Tests that identical seeds produce identical trajectories for ALR MP Envs."""
|
||||||
@ -146,6 +66,7 @@ class TestMPEnvironments(unittest.TestCase):
|
|||||||
with self.subTest(msg="ProMP"):
|
with self.subTest(msg="ProMP"):
|
||||||
self._run_env_determinism(alr_envs.ALL_ALR_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
self._run_env_determinism(alr_envs.ALL_ALR_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
||||||
|
|
||||||
|
|
||||||
def test_openai_environment_determinism(self):
|
def test_openai_environment_determinism(self):
|
||||||
"""Tests that identical seeds produce identical trajectories for OpenAI gym MP Envs."""
|
"""Tests that identical seeds produce identical trajectories for OpenAI gym MP Envs."""
|
||||||
with self.subTest(msg="DMP"):
|
with self.subTest(msg="DMP"):
|
||||||
@ -153,6 +74,7 @@ class TestMPEnvironments(unittest.TestCase):
|
|||||||
with self.subTest(msg="ProMP"):
|
with self.subTest(msg="ProMP"):
|
||||||
self._run_env_determinism(alr_envs.ALL_GYM_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
self._run_env_determinism(alr_envs.ALL_GYM_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
||||||
|
|
||||||
|
|
||||||
def test_dmc_environment_determinism(self):
|
def test_dmc_environment_determinism(self):
|
||||||
"""Tests that identical seeds produce identical trajectories for DMC MP Envs."""
|
"""Tests that identical seeds produce identical trajectories for DMC MP Envs."""
|
||||||
with self.subTest(msg="DMP"):
|
with self.subTest(msg="DMP"):
|
||||||
@ -160,13 +82,10 @@ class TestMPEnvironments(unittest.TestCase):
|
|||||||
with self.subTest(msg="ProMP"):
|
with self.subTest(msg="ProMP"):
|
||||||
self._run_env_determinism(alr_envs.ALL_DEEPMIND_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
self._run_env_determinism(alr_envs.ALL_DEEPMIND_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
||||||
|
|
||||||
|
|
||||||
def test_metaworld_environment_determinism(self):
|
def test_metaworld_environment_determinism(self):
|
||||||
"""Tests that identical seeds produce identical trajectories for Metaworld MP Envs."""
|
"""Tests that identical seeds produce identical trajectories for Metaworld MP Envs."""
|
||||||
with self.subTest(msg="DMP"):
|
with self.subTest(msg="DMP"):
|
||||||
self._run_env_determinism(alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS["DMP"])
|
self._run_env_determinism(alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS["DMP"])
|
||||||
with self.subTest(msg="ProMP"):
|
with self.subTest(msg="ProMP"):
|
||||||
self._run_env_determinism(alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
self._run_env_determinism(alr_envs.ALL_METAWORLD_MOTION_PRIMITIVE_ENVIRONMENTS["ProMP"])
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
|
||||||
unittest.main()
|
|
||||||
|
84
test/utils.py
Normal file
84
test/utils.py
Normal file
@ -0,0 +1,84 @@
|
|||||||
|
import gym
|
||||||
|
import numpy as np
|
||||||
|
from fancy_gym import make
|
||||||
|
|
||||||
|
|
||||||
|
def run_env(env_id, iterations=None, seed=SEED, render=False):
|
||||||
|
"""
|
||||||
|
Example for running a DMC based env in the step based setting.
|
||||||
|
The env_id has to be specified as `domain_name-task_name` or
|
||||||
|
for manipulation tasks as `manipulation-environment_name`
|
||||||
|
|
||||||
|
Args:
|
||||||
|
env_id: Either `domain_name-task_name` or `manipulation-environment_name`
|
||||||
|
iterations: Number of rollout steps to run
|
||||||
|
seed= random seeding
|
||||||
|
render: Render the episode
|
||||||
|
|
||||||
|
Returns:
|
||||||
|
|
||||||
|
"""
|
||||||
|
env: gym.Env = make(env_id, seed=seed)
|
||||||
|
rewards = []
|
||||||
|
observations = []
|
||||||
|
dones = []
|
||||||
|
obs = env.reset()
|
||||||
|
_verify_observations(obs, env.observation_space, "reset()")
|
||||||
|
|
||||||
|
length = env.spec.max_episode_steps
|
||||||
|
if iterations is None:
|
||||||
|
if length is None:
|
||||||
|
iterations = 1
|
||||||
|
else:
|
||||||
|
iterations = length
|
||||||
|
|
||||||
|
# number of samples(multiple environment steps)
|
||||||
|
for i in range(iterations):
|
||||||
|
observations.append(obs)
|
||||||
|
|
||||||
|
ac = env.action_space.sample()
|
||||||
|
# ac = np.random.uniform(env.action_space.low, env.action_space.high, env.action_space.shape)
|
||||||
|
obs, reward, done, info = env.step(ac)
|
||||||
|
|
||||||
|
_verify_observations(obs, env.observation_space, "step()")
|
||||||
|
_verify_reward(reward)
|
||||||
|
_verify_done(done)
|
||||||
|
|
||||||
|
rewards.append(reward)
|
||||||
|
dones.append(done)
|
||||||
|
|
||||||
|
if render:
|
||||||
|
env.render("human")
|
||||||
|
|
||||||
|
if done:
|
||||||
|
obs = env.reset()
|
||||||
|
|
||||||
|
assert done, "Done flag is not True after max episode length."
|
||||||
|
observations.append(obs)
|
||||||
|
env.close()
|
||||||
|
del env
|
||||||
|
return np.array(observations), np.array(rewards), np.array(dones)
|
||||||
|
|
||||||
|
|
||||||
|
def _run_env_determinism(self, env_id: str, seed: int):
|
||||||
|
traj1 = self.run_env(env_id, seed=seed)
|
||||||
|
traj2 = self.run_env(env_id, seed=seed)
|
||||||
|
# Iterate over two trajectories, which should have the same state and action sequence
|
||||||
|
for i, time_step in enumerate(zip(*traj1, *traj2)):
|
||||||
|
obs1, rwd1, done1, obs2, rwd2, done2 = time_step
|
||||||
|
self.assertTrue(np.array_equal(obs1, obs2), f"Observations [{i}] {obs1} and {obs2} do not match.")
|
||||||
|
self.assertEqual(rwd1, rwd2, f"Rewards [{i}] {rwd1} and {rwd2} do not match.")
|
||||||
|
self.assertEqual(done1, done2, f"Dones [{i}] {done1} and {done2} do not match.")
|
||||||
|
|
||||||
|
|
||||||
|
def _verify_observations(obs, observation_space, obs_type="reset()"):
|
||||||
|
assert observation_space.contains(obs), \
|
||||||
|
f"Observation {obs} received from {obs_type} not contained in observation space {observation_space}."
|
||||||
|
|
||||||
|
|
||||||
|
def _verify_reward(reward):
|
||||||
|
assert isinstance(reward, float), f"Returned {reward} as reward, expected float."
|
||||||
|
|
||||||
|
|
||||||
|
def _verify_done(done):
|
||||||
|
assert isinstance(done, bool), f"Returned {done} as done flag, expected bool."
|
Loading…
Reference in New Issue
Block a user