dppo/cfg/d3il/eval/avoid_m1/eval_diffusion_mlp.yaml

70 lines
1.7 KiB
YAML

defaults:
- _self_
hydra:
run:
dir: ${logdir}
_target_: agent.eval.eval_diffusion_agent.EvalDiffusionAgent
name: ${env_name}_eval_diffusion_mlp_ta${horizon_steps}_td${denoising_steps}
logdir: ${oc.env:DPPO_LOG_DIR}/d3il-eval/${name}/${now:%Y-%m-%d}_${now:%H-%M-%S}_${seed}
base_policy_path:
normalization_path: ${oc.env:DPPO_DATA_DIR}/d3il/avoid_m1/normalization.npz
seed: 42
device: cuda:0
env_name: avoiding-m5
obs_dim: 4
action_dim: 2
denoising_steps: 20
cond_steps: 1
horizon_steps: 4
act_steps: 4
ft_denoising_steps: 0
n_steps: 25
render_num: 40
plotter:
_target_: env.plot_traj.TrajPlotter
env_type: avoid
normalization_path: ${normalization_path}
env:
n_envs: 40
name: ${env_name}
max_episode_steps: 100
reset_at_iteration: True
save_video: False
best_reward_threshold_for_success: 2
save_full_observations: True
wrappers:
d3il_lowdim:
normalization_path: ${normalization_path}
multi_step:
n_obs_steps: ${cond_steps}
n_action_steps: ${act_steps}
max_episode_steps: ${env.max_episode_steps}
pass_full_observations: ${env.save_full_observations}
reset_within_step: False
model:
_target_: model.diffusion.diffusion_eval_ft.DiffusionEval
ft_denoising_steps: ${ft_denoising_steps}
predict_epsilon: True
denoised_clip_value: 1.0
#
network_path: ${base_policy_path}
network:
_target_: model.diffusion.mlp_diffusion.DiffusionMLP
time_dim: 16
mlp_dims: [512, 512, 512]
activation_type: ReLU
residual_style: True
cond_dim: ${eval:'${obs_dim} * ${cond_steps}'}
horizon_steps: ${horizon_steps}
action_dim: ${action_dim}
horizon_steps: ${horizon_steps}
obs_dim: ${obs_dim}
action_dim: ${action_dim}
denoising_steps: ${denoising_steps}
device: ${device}