MindNetML commited on
Commit
a2ccce1
1 Parent(s): 4ce6f05

Upload config.json with huggingface_hub

Browse files
Files changed (1) hide show
  1. config.json +141 -0
config.json ADDED
@@ -0,0 +1,141 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "help": false,
3
+ "algo": "APPO",
4
+ "env": "doom_health_gathering_supreme",
5
+ "experiment": "default_experiment",
6
+ "train_dir": "/content/train_dir",
7
+ "restart_behavior": "resume",
8
+ "device": "gpu",
9
+ "seed": null,
10
+ "num_policies": 1,
11
+ "async_rl": true,
12
+ "serial_mode": false,
13
+ "batched_sampling": false,
14
+ "num_batches_to_accumulate": 2,
15
+ "worker_num_splits": 2,
16
+ "policy_workers_per_policy": 1,
17
+ "max_policy_lag": 1000,
18
+ "num_workers": 8,
19
+ "num_envs_per_worker": 4,
20
+ "batch_size": 1024,
21
+ "num_batches_per_epoch": 1,
22
+ "num_epochs": 1,
23
+ "rollout": 32,
24
+ "recurrence": 32,
25
+ "shuffle_minibatches": false,
26
+ "gamma": 0.99,
27
+ "reward_scale": 1.0,
28
+ "reward_clip": 1000.0,
29
+ "value_bootstrap": false,
30
+ "normalize_returns": true,
31
+ "exploration_loss_coeff": 0.001,
32
+ "value_loss_coeff": 0.5,
33
+ "kl_loss_coeff": 0.0,
34
+ "exploration_loss": "symmetric_kl",
35
+ "gae_lambda": 0.95,
36
+ "ppo_clip_ratio": 0.1,
37
+ "ppo_clip_value": 0.2,
38
+ "with_vtrace": false,
39
+ "vtrace_rho": 1.0,
40
+ "vtrace_c": 1.0,
41
+ "optimizer": "adam",
42
+ "adam_eps": 1e-06,
43
+ "adam_beta1": 0.9,
44
+ "adam_beta2": 0.999,
45
+ "max_grad_norm": 4.0,
46
+ "learning_rate": 0.0001,
47
+ "lr_schedule": "constant",
48
+ "lr_schedule_kl_threshold": 0.008,
49
+ "obs_subtract_mean": 0.0,
50
+ "obs_scale": 255.0,
51
+ "normalize_input": true,
52
+ "normalize_input_keys": null,
53
+ "decorrelate_experience_max_seconds": 0,
54
+ "decorrelate_envs_on_one_worker": true,
55
+ "actor_worker_gpus": [],
56
+ "set_workers_cpu_affinity": true,
57
+ "force_envs_single_thread": false,
58
+ "default_niceness": 0,
59
+ "log_to_file": true,
60
+ "experiment_summaries_interval": 10,
61
+ "flush_summaries_interval": 30,
62
+ "stats_avg": 100,
63
+ "summaries_use_frameskip": true,
64
+ "heartbeat_interval": 20,
65
+ "heartbeat_reporting_interval": 600,
66
+ "train_for_env_steps": 4000000,
67
+ "train_for_seconds": 10000000000,
68
+ "save_every_sec": 120,
69
+ "keep_checkpoints": 2,
70
+ "load_checkpoint_kind": "latest",
71
+ "save_milestones_sec": -1,
72
+ "save_best_every_sec": 5,
73
+ "save_best_metric": "reward",
74
+ "save_best_after": 100000,
75
+ "benchmark": false,
76
+ "encoder_mlp_layers": [
77
+ 512,
78
+ 512
79
+ ],
80
+ "encoder_conv_architecture": "convnet_simple",
81
+ "encoder_conv_mlp_layers": [
82
+ 512
83
+ ],
84
+ "use_rnn": true,
85
+ "rnn_size": 512,
86
+ "rnn_type": "gru",
87
+ "rnn_num_layers": 1,
88
+ "decoder_mlp_layers": [],
89
+ "nonlinearity": "elu",
90
+ "policy_initialization": "orthogonal",
91
+ "policy_init_gain": 1.0,
92
+ "actor_critic_share_weights": true,
93
+ "adaptive_stddev": true,
94
+ "continuous_tanh_scale": 0.0,
95
+ "initial_stddev": 1.0,
96
+ "use_env_info_cache": false,
97
+ "env_gpu_actions": false,
98
+ "env_gpu_observations": true,
99
+ "env_frameskip": 4,
100
+ "env_framestack": 1,
101
+ "pixel_format": "CHW",
102
+ "use_record_episode_statistics": false,
103
+ "with_wandb": false,
104
+ "wandb_user": null,
105
+ "wandb_project": "sample_factory",
106
+ "wandb_group": null,
107
+ "wandb_job_type": "SF",
108
+ "wandb_tags": [],
109
+ "with_pbt": false,
110
+ "pbt_mix_policies_in_one_env": true,
111
+ "pbt_period_env_steps": 5000000,
112
+ "pbt_start_mutation": 20000000,
113
+ "pbt_replace_fraction": 0.3,
114
+ "pbt_mutation_rate": 0.15,
115
+ "pbt_replace_reward_gap": 0.1,
116
+ "pbt_replace_reward_gap_absolute": 1e-06,
117
+ "pbt_optimize_gamma": false,
118
+ "pbt_target_objective": "true_objective",
119
+ "pbt_perturb_min": 1.1,
120
+ "pbt_perturb_max": 1.5,
121
+ "num_agents": -1,
122
+ "num_humans": 0,
123
+ "num_bots": -1,
124
+ "start_bot_difficulty": null,
125
+ "timelimit": null,
126
+ "res_w": 128,
127
+ "res_h": 72,
128
+ "wide_aspect_ratio": false,
129
+ "eval_env_frameskip": 1,
130
+ "fps": 35,
131
+ "command_line": "--env=doom_health_gathering_supreme --num_workers=8 --num_envs_per_worker=4 --train_for_env_steps=4000000",
132
+ "cli_args": {
133
+ "env": "doom_health_gathering_supreme",
134
+ "num_workers": 8,
135
+ "num_envs_per_worker": 4,
136
+ "train_for_env_steps": 4000000
137
+ },
138
+ "git_hash": "unknown",
139
+ "git_repo_name": "not a git repository",
140
+ "train_script": ".usr.local.lib.python3.10.dist-packages.ipykernel_launcher"
141
+ }