marksverdhei commited on
Commit
b37ef15
1 Parent(s): be92179
config.json ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "gpt2",
3
+ "activation_function": "gelu_new",
4
+ "architectures": [
5
+ "GPT2LMHeadModel"
6
+ ],
7
+ "attn_pdrop": 0.1,
8
+ "bos_token_id": 50256,
9
+ "embd_pdrop": 0.1,
10
+ "eos_token_id": 50256,
11
+ "initializer_range": 0.02,
12
+ "layer_norm_epsilon": 1e-05,
13
+ "max_length": 512,
14
+ "model_type": "gpt2",
15
+ "n_ctx": 1024,
16
+ "n_embd": 768,
17
+ "n_head": 12,
18
+ "n_inner": null,
19
+ "n_layer": 12,
20
+ "n_positions": 1024,
21
+ "reorder_and_upcast_attn": false,
22
+ "resid_pdrop": 0.1,
23
+ "scale_attn_by_inverse_layer_idx": false,
24
+ "scale_attn_weights": true,
25
+ "summary_activation": null,
26
+ "summary_first_dropout": 0.1,
27
+ "summary_proj_to_labels": true,
28
+ "summary_type": "cls_index",
29
+ "summary_use_proj": true,
30
+ "task_specific_params": {
31
+ "text-generation": {
32
+ "do_sample": true,
33
+ "max_length": 50
34
+ }
35
+ },
36
+ "torch_dtype": "float32",
37
+ "transformers_version": "4.31.0",
38
+ "use_cache": true,
39
+ "vocab_size": 50257
40
+ }
generation_config.json ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 50256,
4
+ "eos_token_id": 50256,
5
+ "max_length": 512,
6
+ "transformers_version": "4.31.0"
7
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8df89e55cf6868d401ad20ca71949411c591660f28f001261bda1b626f5c4790
3
+ size 1384581
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1c6b013f46b045747861457117714384172b61092c169466468f8561fe7778bc
3
+ size 497807197
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7eb0108a1e50c6b0d24ddba383250004950c4cb216d5e80491253222ae96da37
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:227b6fc98e72e825fc7cb2707e3d2e4916c6a423fe8d00e2abd407f878fb8fe6
3
+ size 627
trainer_state.json ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.8317058086395264,
3
+ "best_model_checkpoint": "output/gpt2-multiquestions/checkpoint-1636",
4
+ "epoch": 2.0,
5
+ "global_step": 1636,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.61,
12
+ "learning_rate": 8.777506112469438e-05,
13
+ "loss": 0.4879,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 1.0,
18
+ "eval_loss": 0.8343050479888916,
19
+ "eval_runtime": 2.343,
20
+ "eval_samples_per_second": 213.404,
21
+ "eval_steps_per_second": 26.889,
22
+ "step": 818
23
+ },
24
+ {
25
+ "epoch": 1.22,
26
+ "learning_rate": 7.555012224938876e-05,
27
+ "loss": 0.4298,
28
+ "step": 1000
29
+ },
30
+ {
31
+ "epoch": 1.83,
32
+ "learning_rate": 6.332518337408312e-05,
33
+ "loss": 0.3909,
34
+ "step": 1500
35
+ },
36
+ {
37
+ "epoch": 2.0,
38
+ "eval_loss": 0.8317058086395264,
39
+ "eval_runtime": 2.381,
40
+ "eval_samples_per_second": 209.994,
41
+ "eval_steps_per_second": 26.459,
42
+ "step": 1636
43
+ }
44
+ ],
45
+ "max_steps": 4090,
46
+ "num_train_epochs": 5,
47
+ "total_flos": 2562490957824000.0,
48
+ "trial_name": null,
49
+ "trial_params": null
50
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:13ea54f10bee7c263a9c6c25683e2e9b93c8db8e8ee08e0adba91e1bafe0b2ab
3
+ size 3963