Upload folder using huggingface_hub
Browse files- .gitattributes +17 -0
- .summary/0/events.out.tfevents.1719943476.174d47ed6df0 +3 -0
- README.md +42 -0
- behaviors_altar_100x100.gif +0 -0
- behaviors_altar_100x100.mp4 +0 -0
- behaviors_resources_100x10.gif +3 -0
- behaviors_resources_100x10.mp4 +3 -0
- behaviors_resources_100x100.gif +3 -0
- behaviors_resources_100x100.mp4 +0 -0
- behaviors_resources_100x25.gif +3 -0
- behaviors_resources_100x25.mp4 +3 -0
- behaviors_resources_10x10.gif +3 -0
- behaviors_resources_10x10.mp4 +0 -0
- behaviors_resources_25x25.gif +3 -0
- behaviors_resources_25x25.mp4 +3 -0
- behaviors_resources_competition_10x10_2a.gif +3 -0
- behaviors_resources_competition_10x10_2a.mp4 +3 -0
- behaviors_resources_competition_25x25_2a.gif +3 -0
- behaviors_resources_competition_25x25_2a.mp4 +3 -0
- behaviors_resources_competition_25x25_3a.gif +3 -0
- behaviors_resources_competition_25x25_3a.mp4 +3 -0
- behaviors_resources_move_cost_10x10.gif +3 -0
- behaviors_resources_move_cost_10x10.mp4 +0 -0
- behaviors_resources_move_cost_25x25.gif +3 -0
- behaviors_resources_move_cost_25x25.mp4 +3 -0
- checkpoint_p0/best_000033451_548061184_reward_1.146.pth +3 -0
- checkpoint_p0/checkpoint_000041944_687210496.pth +3 -0
- checkpoint_p0/checkpoint_000042310_693207040.pth +3 -0
- config.json +164 -0
- git.diff +0 -0
- replay.gif +0 -0
- replay.mp4 +0 -0
- sf_log.txt +0 -0
.gitattributes
CHANGED
@@ -33,3 +33,20 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
33 |
*.zip filter=lfs diff=lfs merge=lfs -text
|
34 |
*.zst filter=lfs diff=lfs merge=lfs -text
|
35 |
*tfevents* filter=lfs diff=lfs merge=lfs -text
|
36 |
+
behaviors_resources_100x10.gif filter=lfs diff=lfs merge=lfs -text
|
37 |
+
behaviors_resources_100x10.mp4 filter=lfs diff=lfs merge=lfs -text
|
38 |
+
behaviors_resources_100x100.gif filter=lfs diff=lfs merge=lfs -text
|
39 |
+
behaviors_resources_100x25.gif filter=lfs diff=lfs merge=lfs -text
|
40 |
+
behaviors_resources_100x25.mp4 filter=lfs diff=lfs merge=lfs -text
|
41 |
+
behaviors_resources_10x10.gif filter=lfs diff=lfs merge=lfs -text
|
42 |
+
behaviors_resources_25x25.gif filter=lfs diff=lfs merge=lfs -text
|
43 |
+
behaviors_resources_25x25.mp4 filter=lfs diff=lfs merge=lfs -text
|
44 |
+
behaviors_resources_competition_10x10_2a.gif filter=lfs diff=lfs merge=lfs -text
|
45 |
+
behaviors_resources_competition_10x10_2a.mp4 filter=lfs diff=lfs merge=lfs -text
|
46 |
+
behaviors_resources_competition_25x25_2a.gif filter=lfs diff=lfs merge=lfs -text
|
47 |
+
behaviors_resources_competition_25x25_2a.mp4 filter=lfs diff=lfs merge=lfs -text
|
48 |
+
behaviors_resources_competition_25x25_3a.gif filter=lfs diff=lfs merge=lfs -text
|
49 |
+
behaviors_resources_competition_25x25_3a.mp4 filter=lfs diff=lfs merge=lfs -text
|
50 |
+
behaviors_resources_move_cost_10x10.gif filter=lfs diff=lfs merge=lfs -text
|
51 |
+
behaviors_resources_move_cost_25x25.gif filter=lfs diff=lfs merge=lfs -text
|
52 |
+
behaviors_resources_move_cost_25x25.mp4 filter=lfs diff=lfs merge=lfs -text
|
.summary/0/events.out.tfevents.1719943476.174d47ed6df0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:47016c3757d1440d2ca13b7f6bc4e615041254aedaad46f05bedecfd06b4c3e3
|
3 |
+
size 14980993
|
README.md
ADDED
@@ -0,0 +1,42 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
library_name: sample-factory
|
3 |
+
tags:
|
4 |
+
- deep-reinforcement-learning
|
5 |
+
- reinforcement-learning
|
6 |
+
- sample-factory
|
7 |
+
---
|
8 |
+
|
9 |
+
A(n) **APPO** model trained on the **GDY-MettaGrid** environment.
|
10 |
+
|
11 |
+
This model was trained using Sample-Factory 2.0: https://github.com/alex-petrenko/sample-factory.
|
12 |
+
Documentation for how to use Sample-Factory can be found at https://www.samplefactory.dev/
|
13 |
+
|
14 |
+
|
15 |
+
## Downloading the model
|
16 |
+
|
17 |
+
After installing Sample-Factory, download the model with:
|
18 |
+
```
|
19 |
+
python -m sample_factory.huggingface.load_from_hub -r metta-ai/baseline.sf.lstm.v0.6.1
|
20 |
+
```
|
21 |
+
|
22 |
+
|
23 |
+
## Using the model
|
24 |
+
|
25 |
+
To run the model after download, use the `enjoy` script corresponding to this environment:
|
26 |
+
```
|
27 |
+
python -m <path.to.enjoy.module> --algo=APPO --env=GDY-MettaGrid --train_dir=./train_dir --experiment=baseline.sf.lstm.v0.6.1
|
28 |
+
```
|
29 |
+
|
30 |
+
|
31 |
+
You can also upload models to the Hugging Face Hub using the same script with the `--push_to_hub` flag.
|
32 |
+
See https://www.samplefactory.dev/10-huggingface/huggingface/ for more details
|
33 |
+
|
34 |
+
## Training with this model
|
35 |
+
|
36 |
+
To continue training with this model, use the `train` script corresponding to this environment:
|
37 |
+
```
|
38 |
+
python -m <path.to.train.module> --algo=APPO --env=GDY-MettaGrid --train_dir=./train_dir --experiment=baseline.sf.lstm.v0.6.1 --restart_behavior=resume --train_for_env_steps=10000000000
|
39 |
+
```
|
40 |
+
|
41 |
+
Note, you may have to adjust `--train_for_env_steps` to a suitably high number as the experiment will resume at the number of steps it concluded at.
|
42 |
+
|
behaviors_altar_100x100.gif
ADDED
behaviors_altar_100x100.mp4
ADDED
Binary file (420 kB). View file
|
|
behaviors_resources_100x10.gif
ADDED
Git LFS Details
|
behaviors_resources_100x10.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:46b572f6dadafef9901c19d37d265308b7cce3fbfcea8cf45c5ac86be59a6fc2
|
3 |
+
size 1590970
|
behaviors_resources_100x100.gif
ADDED
Git LFS Details
|
behaviors_resources_100x100.mp4
ADDED
Binary file (438 kB). View file
|
|
behaviors_resources_100x25.gif
ADDED
Git LFS Details
|
behaviors_resources_100x25.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:52c5a7ad3f98e109c5930dc7f39bfbe7aba55e5733dcfe3b3af18493e4539a80
|
3 |
+
size 2334233
|
behaviors_resources_10x10.gif
ADDED
Git LFS Details
|
behaviors_resources_10x10.mp4
ADDED
Binary file (515 kB). View file
|
|
behaviors_resources_25x25.gif
ADDED
Git LFS Details
|
behaviors_resources_25x25.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cb4af48769f34c6ba166c301683a193ec4ba3793677c6e81684b6ec247c0d910
|
3 |
+
size 2889776
|
behaviors_resources_competition_10x10_2a.gif
ADDED
Git LFS Details
|
behaviors_resources_competition_10x10_2a.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a016bcbb7a7d55718204f17496b3ec738e222182f37b08b428833eb87119885a
|
3 |
+
size 1051422
|
behaviors_resources_competition_25x25_2a.gif
ADDED
Git LFS Details
|
behaviors_resources_competition_25x25_2a.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:59ed84173f8923436dd461880d5a1a5cd038138f3114d4321e424bfd9b3910c6
|
3 |
+
size 3712550
|
behaviors_resources_competition_25x25_3a.gif
ADDED
Git LFS Details
|
behaviors_resources_competition_25x25_3a.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:90f44d65aaba64d45997562fd51ca28c4af66f1b344ddfd2980d20493a452d9a
|
3 |
+
size 4957715
|
behaviors_resources_move_cost_10x10.gif
ADDED
Git LFS Details
|
behaviors_resources_move_cost_10x10.mp4
ADDED
Binary file (587 kB). View file
|
|
behaviors_resources_move_cost_25x25.gif
ADDED
Git LFS Details
|
behaviors_resources_move_cost_25x25.mp4
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:796ee6e8e96e04a26e2a964b51add9d476e11a7724990bb7103a9f411c1a5f39
|
3 |
+
size 1722045
|
checkpoint_p0/best_000033451_548061184_reward_1.146.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4a15d75c054fed9e3dbb64a351d130847732aedc686afa4a045dd755ce1c7f59
|
3 |
+
size 39046330
|
checkpoint_p0/checkpoint_000041944_687210496.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e237c56621b243fd254fcd0ba03d1299050cc8609fa05753d98678cd6694a30a
|
3 |
+
size 39047770
|
checkpoint_p0/checkpoint_000042310_693207040.pth
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:a1e953f240983b31e883d0ba65a71e9fe32414b58a3ddb33fd1114f27d095a4e
|
3 |
+
size 39047770
|
config.json
ADDED
@@ -0,0 +1,164 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"help": false,
|
3 |
+
"algo": "APPO",
|
4 |
+
"env": "GDY-MettaGrid",
|
5 |
+
"experiment": "p2.sf.1",
|
6 |
+
"train_dir": "./train_dir/sample_factory",
|
7 |
+
"restart_behavior": "resume",
|
8 |
+
"device": "gpu",
|
9 |
+
"seed": 0,
|
10 |
+
"num_policies": 1,
|
11 |
+
"async_rl": true,
|
12 |
+
"serial_mode": false,
|
13 |
+
"batched_sampling": false,
|
14 |
+
"num_batches_to_accumulate": 2,
|
15 |
+
"worker_num_splits": 2,
|
16 |
+
"policy_workers_per_policy": 1,
|
17 |
+
"max_policy_lag": 50,
|
18 |
+
"num_workers": 32,
|
19 |
+
"num_envs_per_worker": 2,
|
20 |
+
"batch_size": 16384,
|
21 |
+
"num_batches_per_epoch": 1,
|
22 |
+
"num_epochs": 1,
|
23 |
+
"rollout": 256,
|
24 |
+
"recurrence": 256,
|
25 |
+
"shuffle_minibatches": false,
|
26 |
+
"gamma": 0.99,
|
27 |
+
"reward_scale": 1.0,
|
28 |
+
"reward_clip": 1000.0,
|
29 |
+
"value_bootstrap": false,
|
30 |
+
"normalize_returns": true,
|
31 |
+
"exploration_loss_coeff": 0.002,
|
32 |
+
"value_loss_coeff": 0.976,
|
33 |
+
"kl_loss_coeff": 0.0,
|
34 |
+
"aux_loss_coeff": 0.0,
|
35 |
+
"exploration_loss": "symmetric_kl",
|
36 |
+
"gae_lambda": 0.95,
|
37 |
+
"ppo_clip_ratio": 0.1,
|
38 |
+
"ppo_clip_value": 1.0,
|
39 |
+
"with_vtrace": false,
|
40 |
+
"vtrace_rho": 1.0,
|
41 |
+
"vtrace_c": 1.0,
|
42 |
+
"optimizer": "adam",
|
43 |
+
"adam_eps": 1e-06,
|
44 |
+
"adam_beta1": 0.9,
|
45 |
+
"adam_beta2": 0.999,
|
46 |
+
"max_grad_norm": 4.0,
|
47 |
+
"learning_rate": 0.0001,
|
48 |
+
"lr_schedule": "constant",
|
49 |
+
"lr_schedule_kl_threshold": 0.008,
|
50 |
+
"lr_adaptive_min": 1e-06,
|
51 |
+
"lr_adaptive_max": 0.01,
|
52 |
+
"obs_subtract_mean": 0.0,
|
53 |
+
"obs_scale": 1.0,
|
54 |
+
"normalize_input": false,
|
55 |
+
"normalize_input_keys": null,
|
56 |
+
"decorrelate_experience_max_seconds": 150,
|
57 |
+
"decorrelate_envs_on_one_worker": true,
|
58 |
+
"actor_worker_gpus": [],
|
59 |
+
"set_workers_cpu_affinity": true,
|
60 |
+
"force_envs_single_thread": false,
|
61 |
+
"default_niceness": 0,
|
62 |
+
"log_to_file": true,
|
63 |
+
"experiment_summaries_interval": 10,
|
64 |
+
"flush_summaries_interval": 30,
|
65 |
+
"stats_avg": 100,
|
66 |
+
"summaries_use_frameskip": true,
|
67 |
+
"heartbeat_interval": 20,
|
68 |
+
"heartbeat_reporting_interval": 180,
|
69 |
+
"train_for_env_steps": 9999999999999,
|
70 |
+
"train_for_seconds": 10000000000,
|
71 |
+
"save_every_sec": 120,
|
72 |
+
"keep_checkpoints": 2,
|
73 |
+
"load_checkpoint_path": null,
|
74 |
+
"init_checkpoint_path": null,
|
75 |
+
"load_checkpoint_kind": "latest",
|
76 |
+
"save_milestones_sec": -1,
|
77 |
+
"save_best_every_sec": 5,
|
78 |
+
"save_best_metric": "reward",
|
79 |
+
"save_best_after": 100000,
|
80 |
+
"benchmark": false,
|
81 |
+
"encoder_mlp_layers": [
|
82 |
+
512,
|
83 |
+
512
|
84 |
+
],
|
85 |
+
"encoder_conv_architecture": "convnet_simple",
|
86 |
+
"encoder_conv_mlp_layers": [
|
87 |
+
512
|
88 |
+
],
|
89 |
+
"use_rnn": true,
|
90 |
+
"rnn_size": 512,
|
91 |
+
"rnn_type": "gru",
|
92 |
+
"rnn_num_layers": 1,
|
93 |
+
"decoder_mlp_layers": [],
|
94 |
+
"nonlinearity": "elu",
|
95 |
+
"policy_initialization": "orthogonal",
|
96 |
+
"policy_init_gain": 1.0,
|
97 |
+
"actor_critic_share_weights": true,
|
98 |
+
"adaptive_stddev": true,
|
99 |
+
"continuous_tanh_scale": 0.0,
|
100 |
+
"initial_stddev": 1.0,
|
101 |
+
"use_env_info_cache": false,
|
102 |
+
"env_gpu_actions": false,
|
103 |
+
"env_gpu_observations": true,
|
104 |
+
"env_frameskip": 1,
|
105 |
+
"env_framestack": 1,
|
106 |
+
"pixel_format": "CHW",
|
107 |
+
"use_record_episode_statistics": false,
|
108 |
+
"episode_counter": false,
|
109 |
+
"with_wandb": true,
|
110 |
+
"wandb_user": "platypus",
|
111 |
+
"wandb_project": "metta",
|
112 |
+
"wandb_group": "platypus",
|
113 |
+
"wandb_job_type": "SF",
|
114 |
+
"wandb_tags": [],
|
115 |
+
"with_pbt": false,
|
116 |
+
"pbt_mix_policies_in_one_env": true,
|
117 |
+
"pbt_period_env_steps": 5000000,
|
118 |
+
"pbt_start_mutation": 20000000,
|
119 |
+
"pbt_replace_fraction": 0.3,
|
120 |
+
"pbt_mutation_rate": 0.15,
|
121 |
+
"pbt_replace_reward_gap": 0.1,
|
122 |
+
"pbt_replace_reward_gap_absolute": 1e-06,
|
123 |
+
"pbt_optimize_gamma": false,
|
124 |
+
"pbt_target_objective": "true_objective",
|
125 |
+
"pbt_perturb_min": 1.1,
|
126 |
+
"pbt_perturb_max": 1.5,
|
127 |
+
"env_cfg": "{\"name\": \"GDY-MettaGrid\", \"_target_\": \"env.griddly.mettagrid.gym_env.MettaGridGymEnv\", \"max_action_value\": 10, \"hidden_features\": {\"grid_obs\": [\"agent:energy\", \"agent:hp\"]}, \"game\": {\"obs_width\": 11, \"obs_height\": 11, \"max_steps\": 5000, \"tile_size\": 16, \"num_agents\": 20, \"no_energy_steps\": 500, \"objects\": {\"agent\": {\"initial_energy\": 250, \"max_energy\": 250, \"max_inventory\": 5, \"freeze_duration\": 10, \"energy_reward\": false, \"hp\": 1, \"mortal\": false, \"upkeep\": {\"time\": 0, \"shield\": 1}, \"use_cost\": 0}, \"altar\": {\"hp\": 30, \"cooldown\": 2, \"use_cost\": 100}, \"converter\": {\"hp\": 30, \"cooldown\": 2, \"energy_output\": {\"r1\": 50, \"r2\": 10, \"r3\": 1}}, \"generator\": {\"hp\": 30, \"cooldown\": 5, \"initial_resources\": 30, \"use_cost\": 0}, \"wall\": {\"density\": 0.01, \"hp\": 10}}, \"actions\": {\"move\": {\"cost\": 0}, \"rotate\": {\"cost\": 0}, \"jump\": {\"cost\": 3}, \"shield\": {\"cost\": 0}, \"transfer\": {\"cost\": 0}, \"use\": {\"cost\": 0}, \"attack\": {\"cost\": 5, \"damage\": 5}}, \"map\": {\"layout\": {\"rooms_x\": 1, \"rooms_y\": 1, \"rooms\": [[\"base\", \"wild_1\", \"base\"], [\"wild_2\", \"center\", \"wild_2\"], [\"base\", \"wild_1\", \"base\"]]}, \"room\": {\"width\": 25, \"height\": 25, \"num_agents\": 5, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 15, \"wall\": 10}}, \"wild_1\": {\"width\": 10, \"height\": 15, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 1, \"converter\": 1, \"generator\": 5, \"wall\": 5}}, \"wild_2\": {\"width\": 15, \"height\": 10, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 1, \"converter\": 1, \"generator\": 5, \"wall\": 5}}, \"center\": {\"width\": 10, \"height\": 10, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 2, \"converter\": 5, \"generator\": 10, \"wall\": 20}}, \"base\": {\"width\": 15, \"height\": 15, \"border\": 1, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 5, \"wall\": 5}}}}, \"kinship\": {\"team_size\": 1, \"team_reward\": 0}}",
|
128 |
+
"agent_cfg": "{\"_target_\": \"agent.metta_agent.MettaAgent\", \"observation_encoders\": {\"grid_obs\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 512, \"layers\": 4}, \"global_vars\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_action\": {\"feature_names\": [\"last_action_id\", \"last_action_val\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_reward\": {\"feature_names\": [\"last_reward\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"kinship\": {\"feature_names\": [\"kinship\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}}, \"fc\": {\"layers\": 3, \"output_dim\": 512}, \"decoder\": {\"_target_\": \"agent.decoder.Decoder\"}, \"core\": {\"rnn_type\": \"gru\", \"rnn_num_layers\": 1, \"rnn_size\": 512}}",
|
129 |
+
"command_line": "--experiment=p2.sf.1 --load_checkpoint_kind=latest --train_dir=./train_dir/sample_factory --nonlinearity=elu --normalize_input=False --seed=0 --recurrence=256 --rollout=256 --aux_loss_coeff=0 --value_loss_coeff=0.976 --exploration_loss=symmetric_kl --exploration_loss_coeff=0.002 --policy_initialization=orthogonal --learning_rate=0.0001 --max_policy_lag=50 --device=gpu --batch_size=16384 --decorrelate_experience_max_seconds=150 --train_for_env_steps=9999999999999 --rnn_type=gru --rnn_num_layers=1 --rnn_size=512 --with_wandb=True --wandb_user=platypus --wandb_project=metta --wandb_group=platypus --env=GDY-MettaGrid --env_cfg={\"name\": \"GDY-MettaGrid\", \"_target_\": \"env.griddly.mettagrid.gym_env.MettaGridGymEnv\", \"max_action_value\": 10, \"hidden_features\": {\"grid_obs\": [\"agent:energy\", \"agent:hp\"]}, \"game\": {\"obs_width\": 11, \"obs_height\": 11, \"max_steps\": 5000, \"tile_size\": 16, \"num_agents\": 20, \"no_energy_steps\": 500, \"objects\": {\"agent\": {\"initial_energy\": 250, \"max_energy\": 250, \"max_inventory\": 5, \"freeze_duration\": 10, \"energy_reward\": false, \"hp\": 1, \"mortal\": false, \"upkeep\": {\"time\": 0, \"shield\": 1}, \"use_cost\": 0}, \"altar\": {\"hp\": 30, \"cooldown\": 2, \"use_cost\": 100}, \"converter\": {\"hp\": 30, \"cooldown\": 2, \"energy_output\": {\"r1\": 50, \"r2\": 10, \"r3\": 1}}, \"generator\": {\"hp\": 30, \"cooldown\": 5, \"initial_resources\": 30, \"use_cost\": 0}, \"wall\": {\"density\": 0.01, \"hp\": 10}}, \"actions\": {\"move\": {\"cost\": 0}, \"rotate\": {\"cost\": 0}, \"jump\": {\"cost\": 3}, \"shield\": {\"cost\": 0}, \"transfer\": {\"cost\": 0}, \"use\": {\"cost\": 0}, \"attack\": {\"cost\": 5, \"damage\": 5}}, \"map\": {\"layout\": {\"rooms_x\": 1, \"rooms_y\": 1, \"rooms\": [[\"base\", \"wild_1\", \"base\"], [\"wild_2\", \"center\", \"wild_2\"], [\"base\", \"wild_1\", \"base\"]]}, \"room\": {\"width\": 25, \"height\": 25, \"num_agents\": 5, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 15, \"wall\": 10}}, \"wild_1\": {\"width\": 10, \"height\": 15, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 1, \"converter\": 1, \"generator\": 5, \"wall\": 5}}, \"wild_2\": {\"width\": 15, \"height\": 10, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 1, \"converter\": 1, \"generator\": 5, \"wall\": 5}}, \"center\": {\"width\": 10, \"height\": 10, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 2, \"converter\": 5, \"generator\": 10, \"wall\": 20}}, \"base\": {\"width\": 15, \"height\": 15, \"border\": 1, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 5, \"wall\": 5}}}}, \"kinship\": {\"team_size\": 1, \"team_reward\": 0}} --agent_cfg={\"_target_\": \"agent.metta_agent.MettaAgent\", \"observation_encoders\": {\"grid_obs\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 512, \"layers\": 4}, \"global_vars\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_action\": {\"feature_names\": [\"last_action_id\", \"last_action_val\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_reward\": {\"feature_names\": [\"last_reward\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"kinship\": {\"feature_names\": [\"kinship\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}}, \"fc\": {\"layers\": 3, \"output_dim\": 512}, \"decoder\": {\"_target_\": \"agent.decoder.Decoder\"}, \"core\": {\"rnn_type\": \"gru\", \"rnn_num_layers\": 1, \"rnn_size\": 512}}",
|
130 |
+
"cli_args": {
|
131 |
+
"env": "GDY-MettaGrid",
|
132 |
+
"experiment": "p2.sf.1",
|
133 |
+
"train_dir": "./train_dir/sample_factory",
|
134 |
+
"device": "gpu",
|
135 |
+
"seed": 0,
|
136 |
+
"max_policy_lag": 50,
|
137 |
+
"batch_size": 16384,
|
138 |
+
"rollout": 256,
|
139 |
+
"recurrence": 256,
|
140 |
+
"exploration_loss_coeff": 0.002,
|
141 |
+
"value_loss_coeff": 0.976,
|
142 |
+
"aux_loss_coeff": 0.0,
|
143 |
+
"exploration_loss": "symmetric_kl",
|
144 |
+
"learning_rate": 0.0001,
|
145 |
+
"normalize_input": false,
|
146 |
+
"decorrelate_experience_max_seconds": 150,
|
147 |
+
"train_for_env_steps": 9999999999999,
|
148 |
+
"load_checkpoint_kind": "latest",
|
149 |
+
"rnn_size": 512,
|
150 |
+
"rnn_type": "gru",
|
151 |
+
"rnn_num_layers": 1,
|
152 |
+
"nonlinearity": "elu",
|
153 |
+
"policy_initialization": "orthogonal",
|
154 |
+
"with_wandb": true,
|
155 |
+
"wandb_user": "platypus",
|
156 |
+
"wandb_project": "metta",
|
157 |
+
"wandb_group": "platypus",
|
158 |
+
"env_cfg": "{\"name\": \"GDY-MettaGrid\", \"_target_\": \"env.griddly.mettagrid.gym_env.MettaGridGymEnv\", \"max_action_value\": 10, \"hidden_features\": {\"grid_obs\": [\"agent:energy\", \"agent:hp\"]}, \"game\": {\"obs_width\": 11, \"obs_height\": 11, \"max_steps\": 5000, \"tile_size\": 16, \"num_agents\": 20, \"no_energy_steps\": 500, \"objects\": {\"agent\": {\"initial_energy\": 250, \"max_energy\": 250, \"max_inventory\": 5, \"freeze_duration\": 10, \"energy_reward\": false, \"hp\": 1, \"mortal\": false, \"upkeep\": {\"time\": 0, \"shield\": 1}, \"use_cost\": 0}, \"altar\": {\"hp\": 30, \"cooldown\": 2, \"use_cost\": 100}, \"converter\": {\"hp\": 30, \"cooldown\": 2, \"energy_output\": {\"r1\": 50, \"r2\": 10, \"r3\": 1}}, \"generator\": {\"hp\": 30, \"cooldown\": 5, \"initial_resources\": 30, \"use_cost\": 0}, \"wall\": {\"density\": 0.01, \"hp\": 10}}, \"actions\": {\"move\": {\"cost\": 0}, \"rotate\": {\"cost\": 0}, \"jump\": {\"cost\": 3}, \"shield\": {\"cost\": 0}, \"transfer\": {\"cost\": 0}, \"use\": {\"cost\": 0}, \"attack\": {\"cost\": 5, \"damage\": 5}}, \"map\": {\"layout\": {\"rooms_x\": 1, \"rooms_y\": 1, \"rooms\": [[\"base\", \"wild_1\", \"base\"], [\"wild_2\", \"center\", \"wild_2\"], [\"base\", \"wild_1\", \"base\"]]}, \"room\": {\"width\": 25, \"height\": 25, \"num_agents\": 5, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 15, \"wall\": 10}}, \"wild_1\": {\"width\": 10, \"height\": 15, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 1, \"converter\": 1, \"generator\": 5, \"wall\": 5}}, \"wild_2\": {\"width\": 15, \"height\": 10, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 1, \"converter\": 1, \"generator\": 5, \"wall\": 5}}, \"center\": {\"width\": 10, \"height\": 10, \"border\": 0, \"objects\": {\"agent\": 0, \"altar\": 2, \"converter\": 5, \"generator\": 10, \"wall\": 20}}, \"base\": {\"width\": 15, \"height\": 15, \"border\": 1, \"objects\": {\"agent\": 5, \"altar\": 1, \"converter\": 3, \"generator\": 5, \"wall\": 5}}}}, \"kinship\": {\"team_size\": 1, \"team_reward\": 0}}",
|
159 |
+
"agent_cfg": "{\"_target_\": \"agent.metta_agent.MettaAgent\", \"observation_encoders\": {\"grid_obs\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 512, \"layers\": 4}, \"global_vars\": {\"feature_names\": [], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_action\": {\"feature_names\": [\"last_action_id\", \"last_action_val\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"last_reward\": {\"feature_names\": [\"last_reward\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}, \"kinship\": {\"feature_names\": [\"kinship\"], \"normalize_features\": true, \"label_dim\": 4, \"output_dim\": 8, \"layers\": 2}}, \"fc\": {\"layers\": 3, \"output_dim\": 512}, \"decoder\": {\"_target_\": \"agent.decoder.Decoder\"}, \"core\": {\"rnn_type\": \"gru\", \"rnn_num_layers\": 1, \"rnn_size\": 512}}"
|
160 |
+
},
|
161 |
+
"git_hash": "5852cedc0ed12afc552a74ebab07765f22ed9f7f",
|
162 |
+
"git_repo_name": "https://github.com/daveey/metta.git",
|
163 |
+
"wandb_unique_id": "p2.sf.1_20240702_110433_093421"
|
164 |
+
}
|
git.diff
ADDED
The diff for this file is too large to render.
See raw diff
|
|
replay.gif
ADDED
replay.mp4
ADDED
Binary file (448 kB). View file
|
|
sf_log.txt
ADDED
The diff for this file is too large to render.
See raw diff
|
|