438k boxoban levels
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- boxoban_data.h5 +2 -2
- logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/config.json +39 -0
- logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/global_step.txt +1 -0
- logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/optimizer.pt +3 -0
- logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/pytorch_model.bin +3 -0
- logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/config.json +1 -0
- logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/events.out.tfevents.1674499600.learnfair0289.3060598.0 +3 -0
- logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/config.json +40 -0
- logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/global_step.txt +1 -0
- logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/optimizer.pt +3 -0
- logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/pytorch_model.bin +3 -0
- logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/config.json +1 -0
- logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/events.out.tfevents.1674499600.learnfair0410.1063561.0 +3 -0
- logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/config.json +39 -0
- logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/global_step.txt +1 -0
- logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/optimizer.pt +3 -0
- logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/pytorch_model.bin +3 -0
- logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/config.json +1 -0
- logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674499600.learnfair0289.3060598.0 +3 -0
- logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674595502.devfair0748.3640277.0 +3 -0
- logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/config.json +40 -0
- logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/global_step.txt +1 -0
- logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/optimizer.pt +3 -0
- logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/pytorch_model.bin +3 -0
- logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/config.json +1 -0
- logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674499600.learnfair0410.1063561.0 +3 -0
- logs/sokoban/model:codeparrot/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/config.json +1 -0
- logs/sokoban/model:codeparrot/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/events.out.tfevents.1674871152.devfair0748.475905.0 +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/config.json +39 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/global_step.txt +1 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/optimizer.pt +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/pytorch_model.bin +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/config.json +1 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/events.out.tfevents.1674873092.learnfair0316.111781.0 +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/config.json +39 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/global_step.txt +1 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/optimizer.pt +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/pytorch_model.bin +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/config.json +1 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674873108.learnfair0431.3905918.0 +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/config.json +39 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/global_step.txt +1 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/optimizer.pt +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/pytorch_model.bin +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/config.json +1 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/events.out.tfevents.1674873101.learnfair0451.3484952.0 +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/config.json +39 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/global_step.txt +1 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/optimizer.pt +3 -0
- logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/pytorch_model.bin +3 -0
boxoban_data.h5
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:2c2f3a6add7f182b6b39a27c34c3015ef83a21cc64871d5bcee8bd6a521cd7fa
|
3 |
+
size 3288195744
|
logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.25.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50259
|
39 |
+
}
|
logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
78400
|
logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d62ac04f24947a0676763bd1937ea926bc0c1de8c574bc026307912f405df56
|
3 |
+
size 995653957
|
logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-78400/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c47508bdfe7c03e38eeb4f2ba94089736b09fb7e0f624c0b8075eaff08b25b80
|
3 |
+
size 510404157
|
logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "l_maze", "data_source": "l_maze", "chunk_size": 128, "model": "gpt2", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": false, "seed": 1, "batch_size": 16, "epochs": 40, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0}
|
logs/l_maze copy/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/events.out.tfevents.1674499600.learnfair0289.3060598.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53461354b466fd25f6c9f5ecb4300dd2b15a4e8cd8ad9ee1f354778cc74b4a8b
|
3 |
+
size 3983113
|
logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/config.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "microsoft/CodeGPT-small-java-adaptedGPT2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"gradient_checkpointing": false,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"layer_norm_epsilon": 1e-05,
|
14 |
+
"model_type": "gpt2",
|
15 |
+
"n_ctx": 1024,
|
16 |
+
"n_embd": 768,
|
17 |
+
"n_head": 12,
|
18 |
+
"n_inner": null,
|
19 |
+
"n_layer": 12,
|
20 |
+
"n_positions": 1024,
|
21 |
+
"reorder_and_upcast_attn": false,
|
22 |
+
"resid_pdrop": 0.1,
|
23 |
+
"scale_attn_by_inverse_layer_idx": false,
|
24 |
+
"scale_attn_weights": true,
|
25 |
+
"summary_activation": null,
|
26 |
+
"summary_first_dropout": 0.1,
|
27 |
+
"summary_proj_to_labels": true,
|
28 |
+
"summary_type": "cls_index",
|
29 |
+
"summary_use_proj": true,
|
30 |
+
"task_specific_params": {
|
31 |
+
"text-generation": {
|
32 |
+
"do_sample": true,
|
33 |
+
"max_length": 50
|
34 |
+
}
|
35 |
+
},
|
36 |
+
"torch_dtype": "float32",
|
37 |
+
"transformers_version": "4.25.1",
|
38 |
+
"use_cache": true,
|
39 |
+
"vocab_size": 50263
|
40 |
+
}
|
logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
76160
|
logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b4a4aa9a820a7ddd913240cdfb0c75199d5d35628722e3b60586dd623eb6582
|
3 |
+
size 995678533
|
logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/checkpoint-76160/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0b08dbc9363ef0f918442a9a62a61dafc026809ab18e44d0d05c22be3002ccc0
|
3 |
+
size 510416445
|
logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "l_maze", "data_source": "l_maze", "chunk_size": 128, "model": "java-gpt2", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": false, "seed": 1, "batch_size": 16, "epochs": 40, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0}
|
logs/l_maze copy/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1 copy/events.out.tfevents.1674499600.learnfair0410.1063561.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1a8b52a0a069dc516bb7aa26c87e7bc9b379f3ce0d3561048b44336d870b6d3
|
3 |
+
size 3869180
|
logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.25.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50259
|
39 |
+
}
|
logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
78400
|
logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8d62ac04f24947a0676763bd1937ea926bc0c1de8c574bc026307912f405df56
|
3 |
+
size 995653957
|
logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-78400/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c47508bdfe7c03e38eeb4f2ba94089736b09fb7e0f624c0b8075eaff08b25b80
|
3 |
+
size 510404157
|
logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "l_maze", "data_source": "l_maze", "chunk_size": 128, "model": "gpt2", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": false, "seed": 1, "batch_size": 16, "epochs": 20, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0}
|
logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674499600.learnfair0289.3060598.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:53461354b466fd25f6c9f5ecb4300dd2b15a4e8cd8ad9ee1f354778cc74b4a8b
|
3 |
+
size 3983113
|
logs/l_maze/gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674595502.devfair0748.3640277.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd8b5bc1662f04e442d54f48cfc18ff5259e8631820bc5beea7ddc0987dc10a8
|
3 |
+
size 7165
|
logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/config.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "microsoft/CodeGPT-small-java-adaptedGPT2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"gradient_checkpointing": false,
|
12 |
+
"initializer_range": 0.02,
|
13 |
+
"layer_norm_epsilon": 1e-05,
|
14 |
+
"model_type": "gpt2",
|
15 |
+
"n_ctx": 1024,
|
16 |
+
"n_embd": 768,
|
17 |
+
"n_head": 12,
|
18 |
+
"n_inner": null,
|
19 |
+
"n_layer": 12,
|
20 |
+
"n_positions": 1024,
|
21 |
+
"reorder_and_upcast_attn": false,
|
22 |
+
"resid_pdrop": 0.1,
|
23 |
+
"scale_attn_by_inverse_layer_idx": false,
|
24 |
+
"scale_attn_weights": true,
|
25 |
+
"summary_activation": null,
|
26 |
+
"summary_first_dropout": 0.1,
|
27 |
+
"summary_proj_to_labels": true,
|
28 |
+
"summary_type": "cls_index",
|
29 |
+
"summary_use_proj": true,
|
30 |
+
"task_specific_params": {
|
31 |
+
"text-generation": {
|
32 |
+
"do_sample": true,
|
33 |
+
"max_length": 50
|
34 |
+
}
|
35 |
+
},
|
36 |
+
"torch_dtype": "float32",
|
37 |
+
"transformers_version": "4.25.1",
|
38 |
+
"use_cache": true,
|
39 |
+
"vocab_size": 50263
|
40 |
+
}
|
logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
76160
|
logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:1b4a4aa9a820a7ddd913240cdfb0c75199d5d35628722e3b60586dd623eb6582
|
3 |
+
size 995678533
|
logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/checkpoint-76160/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:0b08dbc9363ef0f918442a9a62a61dafc026809ab18e44d0d05c22be3002ccc0
|
3 |
+
size 510416445
|
logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "l_maze", "data_source": "l_maze", "chunk_size": 128, "model": "java-gpt2", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": false, "seed": 1, "batch_size": 16, "epochs": 40, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0}
|
logs/l_maze/java-gpt2/l_maze/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674499600.learnfair0410.1063561.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:f1a8b52a0a069dc516bb7aa26c87e7bc9b379f3ce0d3561048b44336d870b6d3
|
3 |
+
size 3869180
|
logs/sokoban/model:codeparrot/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "sokoban", "level_key": "level", "annotation_keys": ["solution_len"], "num_annotation_buckets": null, "holdout_solution_lens": null, "chunk_size": 128, "model": "codeparrot", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": true, "seed": 42, "batch_size": 16, "epochs": 20, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0, "sample_contexts": false}
|
logs/sokoban/model:codeparrot/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/events.out.tfevents.1674871152.devfair0748.475905.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:33bc448370b7a4680ef47e9e6b79955ff07d10444e7f542ebd3d3b6691f6580d
|
3 |
+
size 88
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.25.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50259
|
39 |
+
}
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
1000
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:87d510c448635538a0843ab08dc6377517c76b397e8747e83d466ac232550301
|
3 |
+
size 995653957
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/checkpoint-1000/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:dc124a986a9ba15f261978e1d7f7878fe726c273f12845858c5c694d8d1481ca
|
3 |
+
size 510404157
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "sokoban", "level_key": "level", "annotation_keys": ["solution_len"], "num_annotation_buckets": null, "holdout_solution_lens": null, "chunk_size": 128, "model": "gpt2", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": false, "seed": 0, "batch_size": 16, "epochs": 20, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0, "sample_contexts": false}
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-0/events.out.tfevents.1674873092.learnfair0316.111781.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:86367ac59d66b13e054e69aa375b3a8ada73ec6e725fe17c2e9ce369f07e44a4
|
3 |
+
size 49424
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.25.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50259
|
39 |
+
}
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
1000
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:668e8ef6aef46cf04f2bead38ee89fdd9786f4f024727ce36faa6b6e44c8e3ed
|
3 |
+
size 995653957
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/checkpoint-1000/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:be0c2480d646c7fabc94d5e1c4c1caee523af08e29a2c0ab6d816e225805705b
|
3 |
+
size 510404157
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "sokoban", "level_key": "level", "annotation_keys": ["solution_len"], "num_annotation_buckets": null, "holdout_solution_lens": null, "chunk_size": 128, "model": "gpt2", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": false, "seed": 1, "batch_size": 16, "epochs": 20, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0, "sample_contexts": false}
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-1/events.out.tfevents.1674873108.learnfair0431.3905918.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:afa604c41d27d75ff45a5e8fef2cf2339ddb6f29ad1127b09c02eda02e2017ca
|
3 |
+
size 49380
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.25.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50259
|
39 |
+
}
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
1000
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9871a175a7701be8a0237f4cba514aad13d664007361dd4753cd1e3293422545
|
3 |
+
size 995653957
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/checkpoint-1000/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9312d596b21b3c2a9073e4763ab7b60c961fdc31925d88458c96f2fba7a544ae
|
3 |
+
size 510404157
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/config.json
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
{"game": "sokoban", "level_key": "level", "annotation_keys": ["solution_len"], "num_annotation_buckets": null, "holdout_solution_lens": null, "chunk_size": 128, "model": "gpt2", "warmup_proportion": 0.0002, "weight_decay": 0.01, "max_grad_norm": 1, "learning_rate": 0.0001, "exp_name": "", "overwrite": false, "seed": 2, "batch_size": 16, "epochs": 20, "save_freq": 1000, "eval_freq": 1000, "no_log": false, "num_eval_samples": 20, "gen_freq": 500, "gen_len": 128, "gen_temp": 1.0, "gen_beams": 5, "gen_top_k": 50, "gen_top_p": 1.0, "gen_typical_p": 1.0, "sample_contexts": false}
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-2/events.out.tfevents.1674873101.learnfair0451.3484952.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4b8f4c0c9c652af535ed348f0fdfd2f82a8f8017a0342a7a51e45840b33000fa
|
3 |
+
size 49435
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/config.json
ADDED
@@ -0,0 +1,39 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "gpt2",
|
3 |
+
"activation_function": "gelu_new",
|
4 |
+
"architectures": [
|
5 |
+
"GPT2LMHeadModel"
|
6 |
+
],
|
7 |
+
"attn_pdrop": 0.1,
|
8 |
+
"bos_token_id": 50256,
|
9 |
+
"embd_pdrop": 0.1,
|
10 |
+
"eos_token_id": 50256,
|
11 |
+
"initializer_range": 0.02,
|
12 |
+
"layer_norm_epsilon": 1e-05,
|
13 |
+
"model_type": "gpt2",
|
14 |
+
"n_ctx": 1024,
|
15 |
+
"n_embd": 768,
|
16 |
+
"n_head": 12,
|
17 |
+
"n_inner": null,
|
18 |
+
"n_layer": 12,
|
19 |
+
"n_positions": 1024,
|
20 |
+
"reorder_and_upcast_attn": false,
|
21 |
+
"resid_pdrop": 0.1,
|
22 |
+
"scale_attn_by_inverse_layer_idx": false,
|
23 |
+
"scale_attn_weights": true,
|
24 |
+
"summary_activation": null,
|
25 |
+
"summary_first_dropout": 0.1,
|
26 |
+
"summary_proj_to_labels": true,
|
27 |
+
"summary_type": "cls_index",
|
28 |
+
"summary_use_proj": true,
|
29 |
+
"task_specific_params": {
|
30 |
+
"text-generation": {
|
31 |
+
"do_sample": true,
|
32 |
+
"max_length": 50
|
33 |
+
}
|
34 |
+
},
|
35 |
+
"torch_dtype": "float32",
|
36 |
+
"transformers_version": "4.25.1",
|
37 |
+
"use_cache": true,
|
38 |
+
"vocab_size": 50259
|
39 |
+
}
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/global_step.txt
ADDED
@@ -0,0 +1 @@
|
|
|
|
|
1 |
+
2000
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/optimizer.pt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3d1dc19476afe61d0395b3820557156216e1491a2f9f9bb8c6568d86b3cf9d1d
|
3 |
+
size 995653957
|
logs/sokoban/model:gpt2/level_key:level/annotation_keys:['solution_len']/num_annotation_buckets:None/holdouts:None/chunk_size-128_lr-0.0001/seed-42/checkpoint-2000/pytorch_model.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:408ff64cae8f9defe8fd7e1af16fd7878d059b7871e54d1ad77fa4707ddbf96e
|
3 |
+
size 510404157
|