zaydzuhri commited on
Commit
7acf7d8
1 Parent(s): 5b10c05

Initial training

Browse files
README.md ADDED
@@ -0,0 +1,67 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - rouge
7
+ model-index:
8
+ - name: flan-t5-base-tldr-100k
9
+ results: []
10
+ ---
11
+
12
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
13
+ should probably proofread and complete it, then remove this comment. -->
14
+
15
+ # flan-t5-base-tldr-100k
16
+
17
+ This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the None dataset.
18
+ It achieves the following results on the evaluation set:
19
+ - Loss: 2.7323
20
+ - Rouge1: 17.0772
21
+ - Rouge2: 4.4204
22
+ - Rougel: 14.549
23
+ - Rougelsum: 15.0148
24
+ - Gen Len: 16.0925
25
+
26
+ ## Model description
27
+
28
+ More information needed
29
+
30
+ ## Intended uses & limitations
31
+
32
+ More information needed
33
+
34
+ ## Training and evaluation data
35
+
36
+ More information needed
37
+
38
+ ## Training procedure
39
+
40
+ ### Training hyperparameters
41
+
42
+ The following hyperparameters were used during training:
43
+ - learning_rate: 5e-05
44
+ - train_batch_size: 8
45
+ - eval_batch_size: 8
46
+ - seed: 42
47
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
48
+ - lr_scheduler_type: linear
49
+ - num_epochs: 5
50
+
51
+ ### Training results
52
+
53
+ | Training Loss | Epoch | Step | Gen Len | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum |
54
+ |:-------------:|:-----:|:-----:|:-------:|:---------------:|:-------:|:------:|:-------:|:---------:|
55
+ | 3.1467 | 1.0 | 11250 | 16.4911 | 2.9531 | 15.909 | 3.8156 | 13.4884 | 13.9568 |
56
+ | 3.0673 | 2.0 | 22500 | 16.4639 | 2.9318 | 16.4972 | 3.9757 | 13.9517 | 14.4336 |
57
+ | 2.9952 | 3.0 | 33750 | 16.2585 | 2.9245 | 16.5997 | 4.1068 | 14.0299 | 14.5147 |
58
+ | 2.9524 | 4.0 | 45000 | 2.7323 | 17.0772 | 4.4204 | 14.549 | 15.0148 | 16.0925 |
59
+ | 2.9223 | 5.0 | 56250 | 2.7328 | 17.1468 | 4.4384 | 14.5798| 15.0572 | 16.2163 |
60
+
61
+
62
+ ### Framework versions
63
+
64
+ - Transformers 4.27.4
65
+ - Pytorch 2.0.0+cu117
66
+ - Datasets 2.11.0
67
+ - Tokenizers 0.13.2
config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/flan-t5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "tie_word_embeddings": false,
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.27.4",
59
+ "use_cache": true,
60
+ "vocab_size": 32128
61
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 0,
3
+ "eos_token_id": 1,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.27.4"
6
+ }
optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4fdb2b6814204eba2061434de01be223a89389bc75ce0ea1e319a63fc338114b
3
+ size 1980790149
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c82aa9b6a927124dccbe7ec64099f793171749f178ac34cb9b59ab65ec439a8
3
+ size 990408885
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:fa3ce086a9504962536d377981c2367867a18f78b8eae2c0916a420701a4208b
3
+ size 14575
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a018b9a26ea0c156eeb9c9950d8d16831fd8327c3fb8add9ccd3661e0ddb1219
3
+ size 627
special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "extra_ids": 100,
106
+ "model_max_length": 512,
107
+ "pad_token": "<pad>",
108
+ "sp_model_kwargs": {},
109
+ "special_tokens_map_file": "/home/younes_huggingface_co/.cache/huggingface/hub/models--google--t5-v1_1-base/snapshots/650d7745bf1e502d6949b22cc19155cd656d3d4e/special_tokens_map.json",
110
+ "tokenizer_class": "T5Tokenizer",
111
+ "unk_token": "<unk>"
112
+ }
trainer_state.json ADDED
@@ -0,0 +1,753 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 2.732292890548706,
3
+ "best_model_checkpoint": "flan-t5-base-tldr-100k/checkpoint-45000",
4
+ "epoch": 5.0,
5
+ "global_step": 56250,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.04,
12
+ "learning_rate": 4.955555555555556e-05,
13
+ "loss": 3.2897,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.09,
18
+ "learning_rate": 4.9111111111111114e-05,
19
+ "loss": 3.2632,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.13,
24
+ "learning_rate": 4.866666666666667e-05,
25
+ "loss": 3.2601,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 0.18,
30
+ "learning_rate": 4.8222222222222225e-05,
31
+ "loss": 3.2293,
32
+ "step": 2000
33
+ },
34
+ {
35
+ "epoch": 0.22,
36
+ "learning_rate": 4.7777777777777784e-05,
37
+ "loss": 3.2164,
38
+ "step": 2500
39
+ },
40
+ {
41
+ "epoch": 0.27,
42
+ "learning_rate": 4.7333333333333336e-05,
43
+ "loss": 3.1959,
44
+ "step": 3000
45
+ },
46
+ {
47
+ "epoch": 0.31,
48
+ "learning_rate": 4.6888888888888895e-05,
49
+ "loss": 3.1905,
50
+ "step": 3500
51
+ },
52
+ {
53
+ "epoch": 0.36,
54
+ "learning_rate": 4.644444444444445e-05,
55
+ "loss": 3.233,
56
+ "step": 4000
57
+ },
58
+ {
59
+ "epoch": 0.4,
60
+ "learning_rate": 4.600000000000001e-05,
61
+ "loss": 3.1792,
62
+ "step": 4500
63
+ },
64
+ {
65
+ "epoch": 0.44,
66
+ "learning_rate": 4.555555555555556e-05,
67
+ "loss": 3.2026,
68
+ "step": 5000
69
+ },
70
+ {
71
+ "epoch": 0.49,
72
+ "learning_rate": 4.511111111111112e-05,
73
+ "loss": 3.1937,
74
+ "step": 5500
75
+ },
76
+ {
77
+ "epoch": 0.53,
78
+ "learning_rate": 4.466666666666667e-05,
79
+ "loss": 3.1751,
80
+ "step": 6000
81
+ },
82
+ {
83
+ "epoch": 0.58,
84
+ "learning_rate": 4.422222222222222e-05,
85
+ "loss": 3.173,
86
+ "step": 6500
87
+ },
88
+ {
89
+ "epoch": 0.62,
90
+ "learning_rate": 4.377777777777778e-05,
91
+ "loss": 3.1968,
92
+ "step": 7000
93
+ },
94
+ {
95
+ "epoch": 0.67,
96
+ "learning_rate": 4.3333333333333334e-05,
97
+ "loss": 3.1666,
98
+ "step": 7500
99
+ },
100
+ {
101
+ "epoch": 0.71,
102
+ "learning_rate": 4.2888888888888886e-05,
103
+ "loss": 3.1855,
104
+ "step": 8000
105
+ },
106
+ {
107
+ "epoch": 0.76,
108
+ "learning_rate": 4.2444444444444445e-05,
109
+ "loss": 3.1626,
110
+ "step": 8500
111
+ },
112
+ {
113
+ "epoch": 0.8,
114
+ "learning_rate": 4.2e-05,
115
+ "loss": 3.1699,
116
+ "step": 9000
117
+ },
118
+ {
119
+ "epoch": 0.84,
120
+ "learning_rate": 4.155555555555556e-05,
121
+ "loss": 3.1657,
122
+ "step": 9500
123
+ },
124
+ {
125
+ "epoch": 0.89,
126
+ "learning_rate": 4.111111111111111e-05,
127
+ "loss": 3.1547,
128
+ "step": 10000
129
+ },
130
+ {
131
+ "epoch": 0.93,
132
+ "learning_rate": 4.066666666666667e-05,
133
+ "loss": 3.1496,
134
+ "step": 10500
135
+ },
136
+ {
137
+ "epoch": 0.98,
138
+ "learning_rate": 4.022222222222222e-05,
139
+ "loss": 3.1467,
140
+ "step": 11000
141
+ },
142
+ {
143
+ "epoch": 1.0,
144
+ "eval_gen_len": 16.4911,
145
+ "eval_loss": 2.953094482421875,
146
+ "eval_rouge1": 15.909,
147
+ "eval_rouge2": 3.8156,
148
+ "eval_rougeL": 13.4884,
149
+ "eval_rougeLsum": 13.9568,
150
+ "eval_runtime": 1704.954,
151
+ "eval_samples_per_second": 5.865,
152
+ "eval_steps_per_second": 0.733,
153
+ "step": 11250
154
+ },
155
+ {
156
+ "epoch": 1.02,
157
+ "learning_rate": 3.977777777777778e-05,
158
+ "loss": 3.0983,
159
+ "step": 11500
160
+ },
161
+ {
162
+ "epoch": 1.07,
163
+ "learning_rate": 3.933333333333333e-05,
164
+ "loss": 3.0641,
165
+ "step": 12000
166
+ },
167
+ {
168
+ "epoch": 1.11,
169
+ "learning_rate": 3.888888888888889e-05,
170
+ "loss": 3.0471,
171
+ "step": 12500
172
+ },
173
+ {
174
+ "epoch": 1.16,
175
+ "learning_rate": 3.844444444444444e-05,
176
+ "loss": 3.0583,
177
+ "step": 13000
178
+ },
179
+ {
180
+ "epoch": 1.2,
181
+ "learning_rate": 3.8e-05,
182
+ "loss": 3.069,
183
+ "step": 13500
184
+ },
185
+ {
186
+ "epoch": 1.24,
187
+ "learning_rate": 3.7555555555555554e-05,
188
+ "loss": 3.0642,
189
+ "step": 14000
190
+ },
191
+ {
192
+ "epoch": 1.29,
193
+ "learning_rate": 3.7111111111111113e-05,
194
+ "loss": 3.0729,
195
+ "step": 14500
196
+ },
197
+ {
198
+ "epoch": 1.33,
199
+ "learning_rate": 3.6666666666666666e-05,
200
+ "loss": 3.0753,
201
+ "step": 15000
202
+ },
203
+ {
204
+ "epoch": 1.38,
205
+ "learning_rate": 3.6222222222222225e-05,
206
+ "loss": 3.0415,
207
+ "step": 15500
208
+ },
209
+ {
210
+ "epoch": 1.42,
211
+ "learning_rate": 3.577777777777778e-05,
212
+ "loss": 3.0632,
213
+ "step": 16000
214
+ },
215
+ {
216
+ "epoch": 1.47,
217
+ "learning_rate": 3.5333333333333336e-05,
218
+ "loss": 3.0757,
219
+ "step": 16500
220
+ },
221
+ {
222
+ "epoch": 1.51,
223
+ "learning_rate": 3.4888888888888895e-05,
224
+ "loss": 3.0589,
225
+ "step": 17000
226
+ },
227
+ {
228
+ "epoch": 1.56,
229
+ "learning_rate": 3.444444444444445e-05,
230
+ "loss": 3.0586,
231
+ "step": 17500
232
+ },
233
+ {
234
+ "epoch": 1.6,
235
+ "learning_rate": 3.4000000000000007e-05,
236
+ "loss": 3.0582,
237
+ "step": 18000
238
+ },
239
+ {
240
+ "epoch": 1.64,
241
+ "learning_rate": 3.355555555555556e-05,
242
+ "loss": 3.0657,
243
+ "step": 18500
244
+ },
245
+ {
246
+ "epoch": 1.69,
247
+ "learning_rate": 3.311111111111112e-05,
248
+ "loss": 3.0653,
249
+ "step": 19000
250
+ },
251
+ {
252
+ "epoch": 1.73,
253
+ "learning_rate": 3.266666666666667e-05,
254
+ "loss": 3.0513,
255
+ "step": 19500
256
+ },
257
+ {
258
+ "epoch": 1.78,
259
+ "learning_rate": 3.222222222222223e-05,
260
+ "loss": 3.082,
261
+ "step": 20000
262
+ },
263
+ {
264
+ "epoch": 1.82,
265
+ "learning_rate": 3.177777777777778e-05,
266
+ "loss": 3.0785,
267
+ "step": 20500
268
+ },
269
+ {
270
+ "epoch": 1.87,
271
+ "learning_rate": 3.1333333333333334e-05,
272
+ "loss": 3.0654,
273
+ "step": 21000
274
+ },
275
+ {
276
+ "epoch": 1.91,
277
+ "learning_rate": 3.088888888888889e-05,
278
+ "loss": 3.0531,
279
+ "step": 21500
280
+ },
281
+ {
282
+ "epoch": 1.96,
283
+ "learning_rate": 3.044444444444445e-05,
284
+ "loss": 3.0518,
285
+ "step": 22000
286
+ },
287
+ {
288
+ "epoch": 2.0,
289
+ "learning_rate": 3e-05,
290
+ "loss": 3.0673,
291
+ "step": 22500
292
+ },
293
+ {
294
+ "epoch": 2.0,
295
+ "eval_gen_len": 16.4639,
296
+ "eval_loss": 2.93178129196167,
297
+ "eval_rouge1": 16.4972,
298
+ "eval_rouge2": 3.9757,
299
+ "eval_rougeL": 13.9517,
300
+ "eval_rougeLsum": 14.4336,
301
+ "eval_runtime": 1398.1721,
302
+ "eval_samples_per_second": 7.152,
303
+ "eval_steps_per_second": 0.894,
304
+ "step": 22500
305
+ },
306
+ {
307
+ "epoch": 2.04,
308
+ "learning_rate": 2.955555555555556e-05,
309
+ "loss": 2.9751,
310
+ "step": 23000
311
+ },
312
+ {
313
+ "epoch": 2.09,
314
+ "learning_rate": 2.9111111111111112e-05,
315
+ "loss": 2.9883,
316
+ "step": 23500
317
+ },
318
+ {
319
+ "epoch": 2.13,
320
+ "learning_rate": 2.8666666666666668e-05,
321
+ "loss": 2.9911,
322
+ "step": 24000
323
+ },
324
+ {
325
+ "epoch": 2.18,
326
+ "learning_rate": 2.8222222222222223e-05,
327
+ "loss": 2.9796,
328
+ "step": 24500
329
+ },
330
+ {
331
+ "epoch": 2.22,
332
+ "learning_rate": 2.777777777777778e-05,
333
+ "loss": 2.9767,
334
+ "step": 25000
335
+ },
336
+ {
337
+ "epoch": 2.27,
338
+ "learning_rate": 2.733333333333333e-05,
339
+ "loss": 2.9795,
340
+ "step": 25500
341
+ },
342
+ {
343
+ "epoch": 2.31,
344
+ "learning_rate": 2.688888888888889e-05,
345
+ "loss": 2.9654,
346
+ "step": 26000
347
+ },
348
+ {
349
+ "epoch": 2.36,
350
+ "learning_rate": 2.6444444444444443e-05,
351
+ "loss": 2.9904,
352
+ "step": 26500
353
+ },
354
+ {
355
+ "epoch": 2.4,
356
+ "learning_rate": 2.6000000000000002e-05,
357
+ "loss": 2.9785,
358
+ "step": 27000
359
+ },
360
+ {
361
+ "epoch": 2.44,
362
+ "learning_rate": 2.5555555555555554e-05,
363
+ "loss": 2.9856,
364
+ "step": 27500
365
+ },
366
+ {
367
+ "epoch": 2.49,
368
+ "learning_rate": 2.5111111111111113e-05,
369
+ "loss": 3.007,
370
+ "step": 28000
371
+ },
372
+ {
373
+ "epoch": 2.53,
374
+ "learning_rate": 2.466666666666667e-05,
375
+ "loss": 2.9819,
376
+ "step": 28500
377
+ },
378
+ {
379
+ "epoch": 2.58,
380
+ "learning_rate": 2.4222222222222224e-05,
381
+ "loss": 3.0003,
382
+ "step": 29000
383
+ },
384
+ {
385
+ "epoch": 2.62,
386
+ "learning_rate": 2.377777777777778e-05,
387
+ "loss": 2.981,
388
+ "step": 29500
389
+ },
390
+ {
391
+ "epoch": 2.67,
392
+ "learning_rate": 2.3333333333333336e-05,
393
+ "loss": 2.9926,
394
+ "step": 30000
395
+ },
396
+ {
397
+ "epoch": 2.71,
398
+ "learning_rate": 2.288888888888889e-05,
399
+ "loss": 2.9901,
400
+ "step": 30500
401
+ },
402
+ {
403
+ "epoch": 2.76,
404
+ "learning_rate": 2.2444444444444447e-05,
405
+ "loss": 2.9893,
406
+ "step": 31000
407
+ },
408
+ {
409
+ "epoch": 2.8,
410
+ "learning_rate": 2.2000000000000003e-05,
411
+ "loss": 2.9668,
412
+ "step": 31500
413
+ },
414
+ {
415
+ "epoch": 2.84,
416
+ "learning_rate": 2.1555555555555555e-05,
417
+ "loss": 2.9903,
418
+ "step": 32000
419
+ },
420
+ {
421
+ "epoch": 2.89,
422
+ "learning_rate": 2.111111111111111e-05,
423
+ "loss": 2.9687,
424
+ "step": 32500
425
+ },
426
+ {
427
+ "epoch": 2.93,
428
+ "learning_rate": 2.0666666666666666e-05,
429
+ "loss": 2.9839,
430
+ "step": 33000
431
+ },
432
+ {
433
+ "epoch": 2.98,
434
+ "learning_rate": 2.0222222222222222e-05,
435
+ "loss": 2.9952,
436
+ "step": 33500
437
+ },
438
+ {
439
+ "epoch": 3.0,
440
+ "eval_gen_len": 16.2585,
441
+ "eval_loss": 2.924546003341675,
442
+ "eval_rouge1": 16.5997,
443
+ "eval_rouge2": 4.1068,
444
+ "eval_rougeL": 14.0299,
445
+ "eval_rougeLsum": 14.5147,
446
+ "eval_runtime": 945.9838,
447
+ "eval_samples_per_second": 10.571,
448
+ "eval_steps_per_second": 1.321,
449
+ "step": 33750
450
+ },
451
+ {
452
+ "epoch": 3.02,
453
+ "learning_rate": 1.9777777777777778e-05,
454
+ "loss": 2.9548,
455
+ "step": 34000
456
+ },
457
+ {
458
+ "epoch": 3.07,
459
+ "learning_rate": 1.9333333333333333e-05,
460
+ "loss": 2.9566,
461
+ "step": 34500
462
+ },
463
+ {
464
+ "epoch": 3.11,
465
+ "learning_rate": 1.888888888888889e-05,
466
+ "loss": 2.953,
467
+ "step": 35000
468
+ },
469
+ {
470
+ "epoch": 3.16,
471
+ "learning_rate": 1.8444444444444445e-05,
472
+ "loss": 2.9514,
473
+ "step": 35500
474
+ },
475
+ {
476
+ "epoch": 3.2,
477
+ "learning_rate": 1.8e-05,
478
+ "loss": 2.9362,
479
+ "step": 36000
480
+ },
481
+ {
482
+ "epoch": 3.24,
483
+ "learning_rate": 1.7555555555555556e-05,
484
+ "loss": 2.9482,
485
+ "step": 36500
486
+ },
487
+ {
488
+ "epoch": 3.29,
489
+ "learning_rate": 1.7111111111111112e-05,
490
+ "loss": 2.9441,
491
+ "step": 37000
492
+ },
493
+ {
494
+ "epoch": 3.33,
495
+ "learning_rate": 1.6666666666666667e-05,
496
+ "loss": 2.9366,
497
+ "step": 37500
498
+ },
499
+ {
500
+ "epoch": 3.38,
501
+ "learning_rate": 1.6222222222222223e-05,
502
+ "loss": 2.9558,
503
+ "step": 38000
504
+ },
505
+ {
506
+ "epoch": 3.42,
507
+ "learning_rate": 1.577777777777778e-05,
508
+ "loss": 2.959,
509
+ "step": 38500
510
+ },
511
+ {
512
+ "epoch": 3.47,
513
+ "learning_rate": 1.5333333333333334e-05,
514
+ "loss": 2.9634,
515
+ "step": 39000
516
+ },
517
+ {
518
+ "epoch": 3.51,
519
+ "learning_rate": 1.4888888888888888e-05,
520
+ "loss": 2.9464,
521
+ "step": 39500
522
+ },
523
+ {
524
+ "epoch": 3.56,
525
+ "learning_rate": 1.4444444444444444e-05,
526
+ "loss": 2.9669,
527
+ "step": 40000
528
+ },
529
+ {
530
+ "epoch": 3.6,
531
+ "learning_rate": 1.4000000000000001e-05,
532
+ "loss": 2.9404,
533
+ "step": 40500
534
+ },
535
+ {
536
+ "epoch": 3.64,
537
+ "learning_rate": 1.3555555555555557e-05,
538
+ "loss": 2.9686,
539
+ "step": 41000
540
+ },
541
+ {
542
+ "epoch": 3.69,
543
+ "learning_rate": 1.3111111111111113e-05,
544
+ "loss": 2.9384,
545
+ "step": 41500
546
+ },
547
+ {
548
+ "epoch": 3.73,
549
+ "learning_rate": 1.2666666666666668e-05,
550
+ "loss": 2.9304,
551
+ "step": 42000
552
+ },
553
+ {
554
+ "epoch": 3.78,
555
+ "learning_rate": 1.2222222222222222e-05,
556
+ "loss": 2.9474,
557
+ "step": 42500
558
+ },
559
+ {
560
+ "epoch": 3.82,
561
+ "learning_rate": 1.1777777777777778e-05,
562
+ "loss": 2.9455,
563
+ "step": 43000
564
+ },
565
+ {
566
+ "epoch": 3.87,
567
+ "learning_rate": 1.1333333333333334e-05,
568
+ "loss": 2.9456,
569
+ "step": 43500
570
+ },
571
+ {
572
+ "epoch": 3.91,
573
+ "learning_rate": 1.088888888888889e-05,
574
+ "loss": 2.9359,
575
+ "step": 44000
576
+ },
577
+ {
578
+ "epoch": 3.96,
579
+ "learning_rate": 1.0444444444444445e-05,
580
+ "loss": 2.9445,
581
+ "step": 44500
582
+ },
583
+ {
584
+ "epoch": 4.0,
585
+ "learning_rate": 1e-05,
586
+ "loss": 2.9524,
587
+ "step": 45000
588
+ },
589
+ {
590
+ "epoch": 4.0,
591
+ "eval_gen_len": 16.0925,
592
+ "eval_loss": 2.732292890548706,
593
+ "eval_rouge1": 17.0772,
594
+ "eval_rouge2": 4.4204,
595
+ "eval_rougeL": 14.549,
596
+ "eval_rougeLsum": 15.0148,
597
+ "eval_runtime": 957.2679,
598
+ "eval_samples_per_second": 10.446,
599
+ "eval_steps_per_second": 1.306,
600
+ "step": 45000
601
+ },
602
+ {
603
+ "epoch": 4.04,
604
+ "learning_rate": 9.555555555555556e-06,
605
+ "loss": 2.8978,
606
+ "step": 45500
607
+ },
608
+ {
609
+ "epoch": 4.09,
610
+ "learning_rate": 9.111111111111112e-06,
611
+ "loss": 2.8935,
612
+ "step": 46000
613
+ },
614
+ {
615
+ "epoch": 4.13,
616
+ "learning_rate": 8.666666666666668e-06,
617
+ "loss": 2.9088,
618
+ "step": 46500
619
+ },
620
+ {
621
+ "epoch": 4.18,
622
+ "learning_rate": 8.222222222222223e-06,
623
+ "loss": 2.9148,
624
+ "step": 47000
625
+ },
626
+ {
627
+ "epoch": 4.22,
628
+ "learning_rate": 7.777777777777777e-06,
629
+ "loss": 2.9336,
630
+ "step": 47500
631
+ },
632
+ {
633
+ "epoch": 4.27,
634
+ "learning_rate": 7.333333333333334e-06,
635
+ "loss": 2.9111,
636
+ "step": 48000
637
+ },
638
+ {
639
+ "epoch": 4.31,
640
+ "learning_rate": 6.888888888888889e-06,
641
+ "loss": 2.934,
642
+ "step": 48500
643
+ },
644
+ {
645
+ "epoch": 4.36,
646
+ "learning_rate": 6.4444444444444445e-06,
647
+ "loss": 2.9219,
648
+ "step": 49000
649
+ },
650
+ {
651
+ "epoch": 4.4,
652
+ "learning_rate": 6e-06,
653
+ "loss": 2.914,
654
+ "step": 49500
655
+ },
656
+ {
657
+ "epoch": 4.44,
658
+ "learning_rate": 5.555555555555556e-06,
659
+ "loss": 2.9088,
660
+ "step": 50000
661
+ },
662
+ {
663
+ "epoch": 4.49,
664
+ "learning_rate": 5.1111111111111115e-06,
665
+ "loss": 2.93,
666
+ "step": 50500
667
+ },
668
+ {
669
+ "epoch": 4.53,
670
+ "learning_rate": 4.666666666666667e-06,
671
+ "loss": 2.9101,
672
+ "step": 51000
673
+ },
674
+ {
675
+ "epoch": 4.58,
676
+ "learning_rate": 4.222222222222223e-06,
677
+ "loss": 2.9189,
678
+ "step": 51500
679
+ },
680
+ {
681
+ "epoch": 4.62,
682
+ "learning_rate": 3.777777777777778e-06,
683
+ "loss": 2.9197,
684
+ "step": 52000
685
+ },
686
+ {
687
+ "epoch": 4.67,
688
+ "learning_rate": 3.3333333333333333e-06,
689
+ "loss": 2.9263,
690
+ "step": 52500
691
+ },
692
+ {
693
+ "epoch": 4.71,
694
+ "learning_rate": 2.888888888888889e-06,
695
+ "loss": 2.925,
696
+ "step": 53000
697
+ },
698
+ {
699
+ "epoch": 4.76,
700
+ "learning_rate": 2.4444444444444447e-06,
701
+ "loss": 2.9124,
702
+ "step": 53500
703
+ },
704
+ {
705
+ "epoch": 4.8,
706
+ "learning_rate": 2.0000000000000003e-06,
707
+ "loss": 2.8981,
708
+ "step": 54000
709
+ },
710
+ {
711
+ "epoch": 4.84,
712
+ "learning_rate": 1.5555555555555556e-06,
713
+ "loss": 2.9145,
714
+ "step": 54500
715
+ },
716
+ {
717
+ "epoch": 4.89,
718
+ "learning_rate": 1.1111111111111112e-06,
719
+ "loss": 2.9135,
720
+ "step": 55000
721
+ },
722
+ {
723
+ "epoch": 4.93,
724
+ "learning_rate": 6.666666666666667e-07,
725
+ "loss": 2.9066,
726
+ "step": 55500
727
+ },
728
+ {
729
+ "epoch": 4.98,
730
+ "learning_rate": 2.2222222222222224e-07,
731
+ "loss": 2.9223,
732
+ "step": 56000
733
+ },
734
+ {
735
+ "epoch": 5.0,
736
+ "eval_gen_len": 16.2163,
737
+ "eval_loss": 2.732771158218384,
738
+ "eval_rouge1": 17.1468,
739
+ "eval_rouge2": 4.4384,
740
+ "eval_rougeL": 14.5798,
741
+ "eval_rougeLsum": 15.0572,
742
+ "eval_runtime": 1011.7448,
743
+ "eval_samples_per_second": 9.884,
744
+ "eval_steps_per_second": 1.235,
745
+ "step": 56250
746
+ }
747
+ ],
748
+ "max_steps": 56250,
749
+ "num_train_epochs": 5,
750
+ "total_flos": 3.081408086016e+17,
751
+ "trial_name": null,
752
+ "trial_params": null
753
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a35832c4a9ab3abfb6dc0e4a1b83adfb1fc2be17a65a663ad2fd8b9f10ba17ea
3
+ size 3707