EC2 Default User commited on
Commit
2ce4c1a
1 Parent(s): 85417d7

First model version.

Browse files
README.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ datasets:
6
+ - samsum
7
+ metrics:
8
+ - rouge
9
+ model-index:
10
+ - name: flan-t5-base-samsum
11
+ results:
12
+ - task:
13
+ name: Sequence-to-sequence Language Modeling
14
+ type: text2text-generation
15
+ dataset:
16
+ name: samsum
17
+ type: samsum
18
+ config: samsum
19
+ split: test
20
+ args: samsum
21
+ metrics:
22
+ - name: Rouge1
23
+ type: rouge
24
+ value: 47.5
25
+ ---
26
+
27
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
28
+ should probably proofread and complete it, then remove this comment. -->
29
+
30
+ # flan-t5-base-samsum
31
+
32
+ This model is a fine-tuned version of [google/flan-t5-base](https://huggingface.co/google/flan-t5-base) on the samsum dataset.
33
+ It achieves the following results on the evaluation set:
34
+ - Loss: 1.3721
35
+ - Rouge1: 47.5
36
+ - Rouge2: 23.9237
37
+ - Rougel: 40.0646
38
+ - Rougelsum: 43.6387
39
+ - Gen Len: 17.2405
40
+
41
+ ## Model description
42
+
43
+ More information needed
44
+
45
+ ## Intended uses & limitations
46
+
47
+ More information needed
48
+
49
+ ## Training and evaluation data
50
+
51
+ More information needed
52
+
53
+ ## Training procedure
54
+
55
+ ### Training hyperparameters
56
+
57
+ The following hyperparameters were used during training:
58
+ - learning_rate: 5e-05
59
+ - train_batch_size: 8
60
+ - eval_batch_size: 8
61
+ - seed: 42
62
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
63
+ - lr_scheduler_type: linear
64
+ - num_epochs: 5
65
+
66
+ ### Training results
67
+
68
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
69
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|:-------:|:-------:|:---------:|:-------:|
70
+ | 1.4398 | 1.0 | 1842 | 1.3823 | 47.2415 | 23.7419 | 39.5142 | 43.4177 | 17.0354 |
71
+ | 1.3564 | 2.0 | 3684 | 1.3747 | 46.833 | 23.308 | 39.2838 | 42.9821 | 17.3077 |
72
+ | 1.2776 | 3.0 | 5526 | 1.3721 | 47.5 | 23.9237 | 40.0646 | 43.6387 | 17.2405 |
73
+ | 1.2345 | 4.0 | 7368 | 1.3744 | 47.5599 | 23.9714 | 40.06 | 43.8107 | 17.2454 |
74
+ | 1.194 | 5.0 | 9210 | 1.3760 | 47.7868 | 24.0949 | 40.2021 | 43.789 | 17.2466 |
75
+
76
+
77
+ ### Framework versions
78
+
79
+ - Transformers 4.27.4
80
+ - Pytorch 2.0.0+cu117
81
+ - Datasets 2.11.0
82
+ - Tokenizers 0.13.3
checkpoint-5526/config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/flan-t5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "tie_word_embeddings": false,
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.27.4",
59
+ "use_cache": true,
60
+ "vocab_size": 32128
61
+ }
checkpoint-5526/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 0,
3
+ "eos_token_id": 1,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.27.4"
6
+ }
checkpoint-5526/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1397835223b6207a32dbdc99adfcd45307400821dc1f4b6c0feee00947d8b68b
3
+ size 1980790149
checkpoint-5526/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e3d687a7e73fddf2af71a4a46ec4964dd6c88632126cf1d9b05155b98d04f419
3
+ size 990408885
checkpoint-5526/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:177f090776e225c6058120d9a9745d56e588d540b14c97f836a5a195fb3901a4
3
+ size 14575
checkpoint-5526/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:67e332c14e7fb333fad327e11feed99660d09aaf307f258c695c8dc79bc86585
3
+ size 627
checkpoint-5526/trainer_state.json ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.3720556497573853,
3
+ "best_model_checkpoint": "flan-t5-base-samsum/checkpoint-5526",
4
+ "epoch": 3.0,
5
+ "global_step": 5526,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.27,
12
+ "learning_rate": 4.728555917480999e-05,
13
+ "loss": 1.4803,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.54,
18
+ "learning_rate": 4.457111834961998e-05,
19
+ "loss": 1.4552,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.81,
24
+ "learning_rate": 4.185667752442997e-05,
25
+ "loss": 1.4398,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 1.0,
30
+ "eval_gen_len": 17.035409035409035,
31
+ "eval_loss": 1.3823134899139404,
32
+ "eval_rouge1": 47.2415,
33
+ "eval_rouge2": 23.7419,
34
+ "eval_rougeL": 39.5142,
35
+ "eval_rougeLsum": 43.4177,
36
+ "eval_runtime": 74.2005,
37
+ "eval_samples_per_second": 11.038,
38
+ "eval_steps_per_second": 1.388,
39
+ "step": 1842
40
+ },
41
+ {
42
+ "epoch": 1.09,
43
+ "learning_rate": 3.914223669923996e-05,
44
+ "loss": 1.4078,
45
+ "step": 2000
46
+ },
47
+ {
48
+ "epoch": 1.36,
49
+ "learning_rate": 3.642779587404995e-05,
50
+ "loss": 1.3341,
51
+ "step": 2500
52
+ },
53
+ {
54
+ "epoch": 1.63,
55
+ "learning_rate": 3.3713355048859935e-05,
56
+ "loss": 1.337,
57
+ "step": 3000
58
+ },
59
+ {
60
+ "epoch": 1.9,
61
+ "learning_rate": 3.099891422366993e-05,
62
+ "loss": 1.3564,
63
+ "step": 3500
64
+ },
65
+ {
66
+ "epoch": 2.0,
67
+ "eval_gen_len": 17.307692307692307,
68
+ "eval_loss": 1.3747227191925049,
69
+ "eval_rouge1": 46.833,
70
+ "eval_rouge2": 23.308,
71
+ "eval_rougeL": 39.2838,
72
+ "eval_rougeLsum": 42.9821,
73
+ "eval_runtime": 74.9443,
74
+ "eval_samples_per_second": 10.928,
75
+ "eval_steps_per_second": 1.374,
76
+ "step": 3684
77
+ },
78
+ {
79
+ "epoch": 2.17,
80
+ "learning_rate": 2.8284473398479917e-05,
81
+ "loss": 1.3162,
82
+ "step": 4000
83
+ },
84
+ {
85
+ "epoch": 2.44,
86
+ "learning_rate": 2.5570032573289905e-05,
87
+ "loss": 1.2739,
88
+ "step": 4500
89
+ },
90
+ {
91
+ "epoch": 2.71,
92
+ "learning_rate": 2.2855591748099893e-05,
93
+ "loss": 1.277,
94
+ "step": 5000
95
+ },
96
+ {
97
+ "epoch": 2.99,
98
+ "learning_rate": 2.0141150922909884e-05,
99
+ "loss": 1.2776,
100
+ "step": 5500
101
+ },
102
+ {
103
+ "epoch": 3.0,
104
+ "eval_gen_len": 17.24053724053724,
105
+ "eval_loss": 1.3720556497573853,
106
+ "eval_rouge1": 47.5,
107
+ "eval_rouge2": 23.9237,
108
+ "eval_rougeL": 40.0646,
109
+ "eval_rougeLsum": 43.6387,
110
+ "eval_runtime": 74.4867,
111
+ "eval_samples_per_second": 10.995,
112
+ "eval_steps_per_second": 1.383,
113
+ "step": 5526
114
+ }
115
+ ],
116
+ "max_steps": 9210,
117
+ "num_train_epochs": 5,
118
+ "total_flos": 3.026353594879181e+16,
119
+ "trial_name": null,
120
+ "trial_params": null
121
+ }
checkpoint-5526/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9945552c747ee998aaff412e61bf9ed72af622c7f04fa9ae25709456833f3bd8
3
+ size 3643
checkpoint-9210/config.json ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/flan-t5-base",
3
+ "architectures": [
4
+ "T5ForConditionalGeneration"
5
+ ],
6
+ "d_ff": 2048,
7
+ "d_kv": 64,
8
+ "d_model": 768,
9
+ "decoder_start_token_id": 0,
10
+ "dense_act_fn": "gelu_new",
11
+ "dropout_rate": 0.1,
12
+ "eos_token_id": 1,
13
+ "feed_forward_proj": "gated-gelu",
14
+ "initializer_factor": 1.0,
15
+ "is_encoder_decoder": true,
16
+ "is_gated_act": true,
17
+ "layer_norm_epsilon": 1e-06,
18
+ "model_type": "t5",
19
+ "n_positions": 512,
20
+ "num_decoder_layers": 12,
21
+ "num_heads": 12,
22
+ "num_layers": 12,
23
+ "output_past": true,
24
+ "pad_token_id": 0,
25
+ "relative_attention_max_distance": 128,
26
+ "relative_attention_num_buckets": 32,
27
+ "task_specific_params": {
28
+ "summarization": {
29
+ "early_stopping": true,
30
+ "length_penalty": 2.0,
31
+ "max_length": 200,
32
+ "min_length": 30,
33
+ "no_repeat_ngram_size": 3,
34
+ "num_beams": 4,
35
+ "prefix": "summarize: "
36
+ },
37
+ "translation_en_to_de": {
38
+ "early_stopping": true,
39
+ "max_length": 300,
40
+ "num_beams": 4,
41
+ "prefix": "translate English to German: "
42
+ },
43
+ "translation_en_to_fr": {
44
+ "early_stopping": true,
45
+ "max_length": 300,
46
+ "num_beams": 4,
47
+ "prefix": "translate English to French: "
48
+ },
49
+ "translation_en_to_ro": {
50
+ "early_stopping": true,
51
+ "max_length": 300,
52
+ "num_beams": 4,
53
+ "prefix": "translate English to Romanian: "
54
+ }
55
+ },
56
+ "tie_word_embeddings": false,
57
+ "torch_dtype": "float32",
58
+ "transformers_version": "4.27.4",
59
+ "use_cache": true,
60
+ "vocab_size": 32128
61
+ }
checkpoint-9210/generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "decoder_start_token_id": 0,
3
+ "eos_token_id": 1,
4
+ "pad_token_id": 0,
5
+ "transformers_version": "4.27.4"
6
+ }
checkpoint-9210/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a43ad9e9833b49ec56f72baff113a0d1761b7d63d87f3227e0e8fb4cbbfc125
3
+ size 1980790149
checkpoint-9210/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e8aa1df3c66854ea36cc36214186c8782f29dccd11dccd45abe2c05a71cf1fbf
3
+ size 990408885
checkpoint-9210/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22ff991c0a65e33ec5fe1c574d60db40b3e082781cb2cd3c0d21a9f06d131188
3
+ size 14575
checkpoint-9210/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81cdb2c1c20df01faadc95a4b03fdefbf2d02893abb39c4f93c6a86a42372740
3
+ size 627
checkpoint-9210/trainer_state.json ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.3720556497573853,
3
+ "best_model_checkpoint": "flan-t5-base-samsum/checkpoint-5526",
4
+ "epoch": 5.0,
5
+ "global_step": 9210,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.27,
12
+ "learning_rate": 4.728555917480999e-05,
13
+ "loss": 1.4803,
14
+ "step": 500
15
+ },
16
+ {
17
+ "epoch": 0.54,
18
+ "learning_rate": 4.457111834961998e-05,
19
+ "loss": 1.4552,
20
+ "step": 1000
21
+ },
22
+ {
23
+ "epoch": 0.81,
24
+ "learning_rate": 4.185667752442997e-05,
25
+ "loss": 1.4398,
26
+ "step": 1500
27
+ },
28
+ {
29
+ "epoch": 1.0,
30
+ "eval_gen_len": 17.035409035409035,
31
+ "eval_loss": 1.3823134899139404,
32
+ "eval_rouge1": 47.2415,
33
+ "eval_rouge2": 23.7419,
34
+ "eval_rougeL": 39.5142,
35
+ "eval_rougeLsum": 43.4177,
36
+ "eval_runtime": 74.2005,
37
+ "eval_samples_per_second": 11.038,
38
+ "eval_steps_per_second": 1.388,
39
+ "step": 1842
40
+ },
41
+ {
42
+ "epoch": 1.09,
43
+ "learning_rate": 3.914223669923996e-05,
44
+ "loss": 1.4078,
45
+ "step": 2000
46
+ },
47
+ {
48
+ "epoch": 1.36,
49
+ "learning_rate": 3.642779587404995e-05,
50
+ "loss": 1.3341,
51
+ "step": 2500
52
+ },
53
+ {
54
+ "epoch": 1.63,
55
+ "learning_rate": 3.3713355048859935e-05,
56
+ "loss": 1.337,
57
+ "step": 3000
58
+ },
59
+ {
60
+ "epoch": 1.9,
61
+ "learning_rate": 3.099891422366993e-05,
62
+ "loss": 1.3564,
63
+ "step": 3500
64
+ },
65
+ {
66
+ "epoch": 2.0,
67
+ "eval_gen_len": 17.307692307692307,
68
+ "eval_loss": 1.3747227191925049,
69
+ "eval_rouge1": 46.833,
70
+ "eval_rouge2": 23.308,
71
+ "eval_rougeL": 39.2838,
72
+ "eval_rougeLsum": 42.9821,
73
+ "eval_runtime": 74.9443,
74
+ "eval_samples_per_second": 10.928,
75
+ "eval_steps_per_second": 1.374,
76
+ "step": 3684
77
+ },
78
+ {
79
+ "epoch": 2.17,
80
+ "learning_rate": 2.8284473398479917e-05,
81
+ "loss": 1.3162,
82
+ "step": 4000
83
+ },
84
+ {
85
+ "epoch": 2.44,
86
+ "learning_rate": 2.5570032573289905e-05,
87
+ "loss": 1.2739,
88
+ "step": 4500
89
+ },
90
+ {
91
+ "epoch": 2.71,
92
+ "learning_rate": 2.2855591748099893e-05,
93
+ "loss": 1.277,
94
+ "step": 5000
95
+ },
96
+ {
97
+ "epoch": 2.99,
98
+ "learning_rate": 2.0141150922909884e-05,
99
+ "loss": 1.2776,
100
+ "step": 5500
101
+ },
102
+ {
103
+ "epoch": 3.0,
104
+ "eval_gen_len": 17.24053724053724,
105
+ "eval_loss": 1.3720556497573853,
106
+ "eval_rouge1": 47.5,
107
+ "eval_rouge2": 23.9237,
108
+ "eval_rougeL": 40.0646,
109
+ "eval_rougeLsum": 43.6387,
110
+ "eval_runtime": 74.4867,
111
+ "eval_samples_per_second": 10.995,
112
+ "eval_steps_per_second": 1.383,
113
+ "step": 5526
114
+ },
115
+ {
116
+ "epoch": 3.26,
117
+ "learning_rate": 1.742671009771987e-05,
118
+ "loss": 1.2209,
119
+ "step": 6000
120
+ },
121
+ {
122
+ "epoch": 3.53,
123
+ "learning_rate": 1.471226927252986e-05,
124
+ "loss": 1.2427,
125
+ "step": 6500
126
+ },
127
+ {
128
+ "epoch": 3.8,
129
+ "learning_rate": 1.1997828447339848e-05,
130
+ "loss": 1.2345,
131
+ "step": 7000
132
+ },
133
+ {
134
+ "epoch": 4.0,
135
+ "eval_gen_len": 17.245421245421245,
136
+ "eval_loss": 1.3743723630905151,
137
+ "eval_rouge1": 47.5599,
138
+ "eval_rouge2": 23.9714,
139
+ "eval_rougeL": 40.06,
140
+ "eval_rougeLsum": 43.8107,
141
+ "eval_runtime": 74.1957,
142
+ "eval_samples_per_second": 11.038,
143
+ "eval_steps_per_second": 1.388,
144
+ "step": 7368
145
+ },
146
+ {
147
+ "epoch": 4.07,
148
+ "learning_rate": 9.283387622149838e-06,
149
+ "loss": 1.2388,
150
+ "step": 7500
151
+ },
152
+ {
153
+ "epoch": 4.34,
154
+ "learning_rate": 6.568946796959827e-06,
155
+ "loss": 1.2124,
156
+ "step": 8000
157
+ },
158
+ {
159
+ "epoch": 4.61,
160
+ "learning_rate": 3.854505971769816e-06,
161
+ "loss": 1.196,
162
+ "step": 8500
163
+ },
164
+ {
165
+ "epoch": 4.89,
166
+ "learning_rate": 1.1400651465798045e-06,
167
+ "loss": 1.194,
168
+ "step": 9000
169
+ },
170
+ {
171
+ "epoch": 5.0,
172
+ "eval_gen_len": 17.246642246642246,
173
+ "eval_loss": 1.376030683517456,
174
+ "eval_rouge1": 47.7868,
175
+ "eval_rouge2": 24.0949,
176
+ "eval_rougeL": 40.2021,
177
+ "eval_rougeLsum": 43.789,
178
+ "eval_runtime": 74.584,
179
+ "eval_samples_per_second": 10.981,
180
+ "eval_steps_per_second": 1.381,
181
+ "step": 9210
182
+ }
183
+ ],
184
+ "max_steps": 9210,
185
+ "num_train_epochs": 5,
186
+ "total_flos": 5.043922658131968e+16,
187
+ "trial_name": null,
188
+ "trial_params": null
189
+ }
checkpoint-9210/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9945552c747ee998aaff412e61bf9ed72af622c7f04fa9ae25709456833f3bd8
3
+ size 3643
logs/1681296542.5000563/events.out.tfevents.1681296542.ip-172-16-3-175.eu-west-1.compute.internal.477.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16f357c99f5989baf7d3dea6de3edd8e03d81eeff24bed6a513e7f552a67f83b
3
+ size 6048
logs/events.out.tfevents.1681296542.ip-172-16-3-175.eu-west-1.compute.internal.477.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8ab4786c44a32e28f774b25ca8c1123df89df5f99f3bd875f82ef068168ce6eb
3
+ size 10744
logs/events.out.tfevents.1681301141.ip-172-16-3-175.eu-west-1.compute.internal.477.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b0866254a9e7a374d5bcc2838b40291c459777268ef0de08773a670b266b249
3
+ size 613
special_tokens_map.json ADDED
@@ -0,0 +1,107 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "pad_token": "<pad>",
106
+ "unk_token": "<unk>"
107
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,112 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": "</s>",
105
+ "extra_ids": 100,
106
+ "model_max_length": 512,
107
+ "pad_token": "<pad>",
108
+ "sp_model_kwargs": {},
109
+ "special_tokens_map_file": "/home/younes_huggingface_co/.cache/huggingface/hub/models--google--t5-v1_1-base/snapshots/650d7745bf1e502d6949b22cc19155cd656d3d4e/special_tokens_map.json",
110
+ "tokenizer_class": "T5Tokenizer",
111
+ "unk_token": "<unk>"
112
+ }