behaviors: Pyramids: behavioral_cloning: null checkpoint_interval: 500000 hyperparameters: {batch_size: 128, beta: 0.01, beta_schedule: linear, buffer_size: 2048, epsilon: 0.2, epsilon_schedule: linear, lambd: 0.95, learning_rate: 0.0003, learning_rate_schedule: linear, num_epoch: 3} init_path: null keep_checkpoints: 5 max_steps: 1000000 network_settings: {deterministic: false, goal_conditioning_type: hyper, hidden_units: 512, memory: null, normalize: false, num_layers: 2, vis_encode_type: simple} reward_signals: extrinsic: gamma: 0.99 network_settings: {deterministic: false, goal_conditioning_type: hyper, hidden_units: 128, memory: null, normalize: false, num_layers: 2, vis_encode_type: simple} strength: 1.0 rnd: encoding_size: null gamma: 0.99 learning_rate: 0.0001 network_settings: {deterministic: false, goal_conditioning_type: hyper, hidden_units: 64, memory: null, normalize: false, num_layers: 3, vis_encode_type: simple} strength: 0.01 self_play: null summary_freq: 30000 threaded: false time_horizon: 128 trainer_type: ppo checkpoint_settings: {force: false, inference: false, initialize_from: null, load_model: false, results_dir: results, resume: false, run_id: Pyramids Training, train_model: false} debug: false default_settings: null engine_settings: {capture_frame_rate: 60, height: 84, no_graphics: true, quality_level: 5, target_frame_rate: -1, time_scale: 20, width: 84} env_settings: {base_port: 5005, env_args: null, env_path: ./trained-envs-executables/linux/Pyramids/Pyramids, max_lifetime_restarts: 10, num_areas: 1, num_envs: 1, restarts_rate_limit_n: 1, restarts_rate_limit_period_s: 60, seed: -1} environment_parameters: null torch_settings: {device: null}