|
exp_config = { |
|
'env': { |
|
'manager': { |
|
'episode_num': float("inf"), |
|
'max_retry': 1, |
|
'retry_type': 'reset', |
|
'auto_reset': True, |
|
'step_timeout': None, |
|
'reset_timeout': None, |
|
'retry_waiting_time': 0.1, |
|
'cfg_type': 'BaseEnvManagerDict' |
|
}, |
|
'stop_value': 6000, |
|
'n_evaluator_episode': 8, |
|
'env_id': 'Walker2d-v3', |
|
'norm_obs': { |
|
'use_norm': False |
|
}, |
|
'norm_reward': { |
|
'use_norm': False |
|
}, |
|
'collector_env_num': 1, |
|
'evaluator_env_num': 8, |
|
'env_wrapper': 'mujoco_default' |
|
}, |
|
'policy': { |
|
'model': { |
|
'obs_shape': 17, |
|
'action_shape': 6, |
|
'twin_critic': False, |
|
'actor_head_hidden_size': 256, |
|
'critic_head_hidden_size': 256, |
|
'action_space': 'regression' |
|
}, |
|
'learn': { |
|
'learner': { |
|
'train_iterations': 1000000000, |
|
'dataloader': { |
|
'num_workers': 0 |
|
}, |
|
'log_policy': True, |
|
'hook': { |
|
'load_ckpt_before_run': '', |
|
'log_show_after_iter': 100, |
|
'save_ckpt_after_iter': 10000, |
|
'save_ckpt_after_run': True |
|
}, |
|
'cfg_type': 'BaseLearnerDict' |
|
}, |
|
'update_per_collect': 1, |
|
'batch_size': 256, |
|
'learning_rate_actor': 0.001, |
|
'learning_rate_critic': 0.001, |
|
'ignore_done': False, |
|
'target_theta': 0.005, |
|
'discount_factor': 0.99, |
|
'actor_update_freq': 1, |
|
'noise': False |
|
}, |
|
'collect': { |
|
'collector': {}, |
|
'unroll_len': 1, |
|
'noise_sigma': 0.1, |
|
'n_sample': 1 |
|
}, |
|
'eval': { |
|
'evaluator': { |
|
'eval_freq': 5000, |
|
'render': { |
|
'render_freq': -1, |
|
'mode': 'train_iter' |
|
}, |
|
'figure_path': None, |
|
'cfg_type': 'InteractionSerialEvaluatorDict', |
|
'stop_value': 6000, |
|
'n_episode': 8 |
|
} |
|
}, |
|
'other': { |
|
'replay_buffer': { |
|
'replay_buffer_size': 1000000 |
|
} |
|
}, |
|
'on_policy': False, |
|
'cuda': True, |
|
'multi_gpu': False, |
|
'bp_update_sync': True, |
|
'traj_len_inf': False, |
|
'type': 'ddpg', |
|
'priority': False, |
|
'priority_IS_weight': False, |
|
'random_collect_size': 25000, |
|
'transition_with_policy_data': False, |
|
'action_space': 'continuous', |
|
'reward_batch_norm': False, |
|
'multi_agent': False, |
|
'cfg_type': 'DDPGPolicyDict' |
|
}, |
|
'exp_name': 'Walker2d-v3-DDPG', |
|
'seed': 0, |
|
'wandb_logger': { |
|
'gradient_logger': True, |
|
'video_logger': True, |
|
'plot_logger': True, |
|
'action_logger': True, |
|
'return_logger': False |
|
} |
|
} |
|
|