José Ángel González commited on
Commit
841bbde
1 Parent(s): 85c8f2c
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ checkpoint-*/
README.md ADDED
@@ -0,0 +1,81 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ tags:
4
+ - generated_from_trainer
5
+ metrics:
6
+ - rouge
7
+ model-index:
8
+ - name: POCTS
9
+ results:
10
+ - task:
11
+ name: Summarization
12
+ type: summarization
13
+ metrics:
14
+ - name: Rouge1
15
+ type: rouge
16
+ value: 23.9947
17
+ ---
18
+
19
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
20
+ should probably proofread and complete it, then remove this comment. -->
21
+
22
+ # POCTS
23
+
24
+ This model is a fine-tuned version of [facebook/bart-large](https://huggingface.co/facebook/bart-large) on an unknown dataset.
25
+ It achieves the following results on the evaluation set:
26
+ - Loss: 4.6366
27
+ - Rouge1: 23.9947
28
+ - Rouge2: 5.3034
29
+ - Rougel: 16.3635
30
+ - Rougelsum: 19.7575
31
+ - Gen Len: 55.25
32
+
33
+ ## Model description
34
+
35
+ More information needed
36
+
37
+ ## Intended uses & limitations
38
+
39
+ More information needed
40
+
41
+ ## Training and evaluation data
42
+
43
+ More information needed
44
+
45
+ ## Training procedure
46
+
47
+ ### Training hyperparameters
48
+
49
+ The following hyperparameters were used during training:
50
+ - learning_rate: 5e-05
51
+ - train_batch_size: 8
52
+ - eval_batch_size: 8
53
+ - seed: 42
54
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
55
+ - lr_scheduler_type: linear
56
+ - lr_scheduler_warmup_ratio: 0.15
57
+ - num_epochs: 10.0
58
+ - mixed_precision_training: Native AMP
59
+
60
+ ### Training results
61
+
62
+ | Training Loss | Epoch | Step | Validation Loss | Rouge1 | Rouge2 | Rougel | Rougelsum | Gen Len |
63
+ |:-------------:|:-----:|:----:|:---------------:|:-------:|:------:|:-------:|:---------:|:-------:|
64
+ | 3.6608 | 1.0 | 125 | 3.3274 | 15.2816 | 4.0723 | 12.0873 | 13.6532 | 19.23 |
65
+ | 3.3135 | 2.0 | 250 | 3.3133 | 16.5105 | 4.4915 | 13.8639 | 14.8435 | 19.31 |
66
+ | 2.7732 | 3.0 | 375 | 3.3856 | 16.8687 | 4.904 | 13.4615 | 14.7432 | 19.93 |
67
+ | 2.2759 | 4.0 | 500 | 3.5508 | 15.4023 | 4.1761 | 12.5635 | 13.7679 | 19.27 |
68
+ | 1.8199 | 5.0 | 625 | 3.7591 | 17.7686 | 4.532 | 13.9086 | 15.3305 | 19.93 |
69
+ | 1.4575 | 6.0 | 750 | 3.9726 | 16.4133 | 4.359 | 13.4621 | 14.5896 | 19.92 |
70
+ | 1.126 | 7.0 | 875 | 4.2964 | 17.3934 | 3.6935 | 13.7934 | 14.9719 | 19.43 |
71
+ | 0.9073 | 8.0 | 1000 | 4.4205 | 17.4328 | 3.8734 | 13.4282 | 14.7105 | 19.83 |
72
+ | 0.7925 | 9.0 | 1125 | 4.5501 | 17.3798 | 3.9775 | 13.3141 | 14.7692 | 19.87 |
73
+ | 0.6844 | 10.0 | 1250 | 4.6366 | 17.3867 | 4.2671 | 13.7924 | 15.1543 | 19.81 |
74
+
75
+
76
+ ### Framework versions
77
+
78
+ - Transformers 4.10.2
79
+ - Pytorch 1.7.1+cu110
80
+ - Datasets 1.11.0
81
+ - Tokenizers 0.10.3
all_results.json ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_gen_len": 55.25,
4
+ "eval_loss": 4.636623382568359,
5
+ "eval_rouge1": 23.9947,
6
+ "eval_rouge2": 5.3034,
7
+ "eval_rougeL": 16.3635,
8
+ "eval_rougeLsum": 19.7575,
9
+ "eval_runtime": 30.2029,
10
+ "eval_samples": 100,
11
+ "eval_samples_per_second": 3.311,
12
+ "eval_steps_per_second": 0.43,
13
+ "train_loss": 1.877090069580078,
14
+ "train_runtime": 1007.5303,
15
+ "train_samples": 995,
16
+ "train_samples_per_second": 9.876,
17
+ "train_steps_per_second": 1.241
18
+ }
config.json ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/bart-large",
3
+ "activation_dropout": 0.1,
4
+ "activation_function": "gelu",
5
+ "add_bias_logits": false,
6
+ "add_final_layer_norm": false,
7
+ "architectures": [
8
+ "BartForConditionalGeneration"
9
+ ],
10
+ "attention_dropout": 0.1,
11
+ "bos_token_id": 0,
12
+ "classif_dropout": 0.1,
13
+ "classifier_dropout": 0.0,
14
+ "d_model": 1024,
15
+ "decoder_attention_heads": 16,
16
+ "decoder_ffn_dim": 4096,
17
+ "decoder_layerdrop": 0.0,
18
+ "decoder_layers": 12,
19
+ "decoder_start_token_id": 2,
20
+ "dropout": 0.1,
21
+ "early_stopping": true,
22
+ "encoder_attention_heads": 16,
23
+ "encoder_ffn_dim": 4096,
24
+ "encoder_layerdrop": 0.0,
25
+ "encoder_layers": 12,
26
+ "eos_token_id": 2,
27
+ "forced_eos_token_id": 2,
28
+ "gradient_checkpointing": false,
29
+ "id2label": {
30
+ "0": "LABEL_0",
31
+ "1": "LABEL_1",
32
+ "2": "LABEL_2"
33
+ },
34
+ "init_std": 0.02,
35
+ "is_encoder_decoder": true,
36
+ "label2id": {
37
+ "LABEL_0": 0,
38
+ "LABEL_1": 1,
39
+ "LABEL_2": 2
40
+ },
41
+ "max_position_embeddings": 1024,
42
+ "model_type": "bart",
43
+ "no_repeat_ngram_size": 3,
44
+ "normalize_before": false,
45
+ "num_beams": 4,
46
+ "num_hidden_layers": 12,
47
+ "pad_token_id": 1,
48
+ "scale_embedding": false,
49
+ "task_specific_params": {
50
+ "summarization": {
51
+ "length_penalty": 1.0,
52
+ "max_length": 128,
53
+ "min_length": 12,
54
+ "num_beams": 4
55
+ },
56
+ "summarization_cnn": {
57
+ "length_penalty": 2.0,
58
+ "max_length": 142,
59
+ "min_length": 56,
60
+ "num_beams": 4
61
+ },
62
+ "summarization_xsum": {
63
+ "length_penalty": 1.0,
64
+ "max_length": 62,
65
+ "min_length": 11,
66
+ "num_beams": 6
67
+ }
68
+ },
69
+ "torch_dtype": "float32",
70
+ "transformers_version": "4.10.2",
71
+ "use_cache": true,
72
+ "vocab_size": 50265
73
+ }
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "eval_gen_len": 55.25,
4
+ "eval_loss": 4.636623382568359,
5
+ "eval_rouge1": 23.9947,
6
+ "eval_rouge2": 5.3034,
7
+ "eval_rougeL": 16.3635,
8
+ "eval_rougeLsum": 19.7575,
9
+ "eval_runtime": 30.2029,
10
+ "eval_samples": 100,
11
+ "eval_samples_per_second": 3.311,
12
+ "eval_steps_per_second": 0.43
13
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:90eee62817bf936a7493476fc3439c5f8844d7965c7e38dcfe0ac1244ea91248
3
+ size 1625569391
special_tokens_map.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"bos_token": "<s>", "eos_token": "</s>", "unk_token": "<unk>", "sep_token": "</s>", "pad_token": "<pad>", "cls_token": "<s>", "mask_token": {"content": "<mask>", "single_word": false, "lstrip": true, "rstrip": false, "normalized": false}}
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"unk_token": "<unk>", "bos_token": "<s>", "eos_token": "</s>", "add_prefix_space": false, "errors": "replace", "sep_token": "</s>", "cls_token": "<s>", "pad_token": "<pad>", "mask_token": "<mask>", "model_max_length": 1024, "special_tokens_map_file": null, "name_or_path": "facebook/bart-large", "tokenizer_class": "BartTokenizer"}
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 10.0,
3
+ "train_loss": 1.877090069580078,
4
+ "train_runtime": 1007.5303,
5
+ "train_samples": 995,
6
+ "train_samples_per_second": 9.876,
7
+ "train_steps_per_second": 1.241
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,305 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "global_step": 1250,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 0.4,
12
+ "learning_rate": 1.223404255319149e-05,
13
+ "loss": 3.8907,
14
+ "step": 50
15
+ },
16
+ {
17
+ "epoch": 0.8,
18
+ "learning_rate": 2.5265957446808515e-05,
19
+ "loss": 3.6608,
20
+ "step": 100
21
+ },
22
+ {
23
+ "epoch": 1.0,
24
+ "eval_gen_len": 19.23,
25
+ "eval_loss": 3.32743763923645,
26
+ "eval_rouge1": 15.2816,
27
+ "eval_rouge2": 4.0723,
28
+ "eval_rougeL": 12.0873,
29
+ "eval_rougeLsum": 13.6532,
30
+ "eval_runtime": 11.1064,
31
+ "eval_samples_per_second": 9.004,
32
+ "eval_steps_per_second": 1.17,
33
+ "step": 125
34
+ },
35
+ {
36
+ "epoch": 1.2,
37
+ "learning_rate": 3.8563829787234045e-05,
38
+ "loss": 3.4017,
39
+ "step": 150
40
+ },
41
+ {
42
+ "epoch": 1.6,
43
+ "learning_rate": 4.967043314500942e-05,
44
+ "loss": 3.3923,
45
+ "step": 200
46
+ },
47
+ {
48
+ "epoch": 2.0,
49
+ "learning_rate": 4.7316384180790966e-05,
50
+ "loss": 3.3135,
51
+ "step": 250
52
+ },
53
+ {
54
+ "epoch": 2.0,
55
+ "eval_gen_len": 19.31,
56
+ "eval_loss": 3.313326358795166,
57
+ "eval_rouge1": 16.5105,
58
+ "eval_rouge2": 4.4915,
59
+ "eval_rougeL": 13.8639,
60
+ "eval_rougeLsum": 14.8435,
61
+ "eval_runtime": 10.7765,
62
+ "eval_samples_per_second": 9.279,
63
+ "eval_steps_per_second": 1.206,
64
+ "step": 250
65
+ },
66
+ {
67
+ "epoch": 2.4,
68
+ "learning_rate": 4.4962335216572505e-05,
69
+ "loss": 2.7445,
70
+ "step": 300
71
+ },
72
+ {
73
+ "epoch": 2.8,
74
+ "learning_rate": 4.260828625235405e-05,
75
+ "loss": 2.7732,
76
+ "step": 350
77
+ },
78
+ {
79
+ "epoch": 3.0,
80
+ "eval_gen_len": 19.93,
81
+ "eval_loss": 3.3855550289154053,
82
+ "eval_rouge1": 16.8687,
83
+ "eval_rouge2": 4.904,
84
+ "eval_rougeL": 13.4615,
85
+ "eval_rougeLsum": 14.7432,
86
+ "eval_runtime": 10.8555,
87
+ "eval_samples_per_second": 9.212,
88
+ "eval_steps_per_second": 1.198,
89
+ "step": 375
90
+ },
91
+ {
92
+ "epoch": 3.2,
93
+ "learning_rate": 4.025423728813559e-05,
94
+ "loss": 2.5055,
95
+ "step": 400
96
+ },
97
+ {
98
+ "epoch": 3.6,
99
+ "learning_rate": 3.790018832391714e-05,
100
+ "loss": 2.1993,
101
+ "step": 450
102
+ },
103
+ {
104
+ "epoch": 4.0,
105
+ "learning_rate": 3.554613935969868e-05,
106
+ "loss": 2.2759,
107
+ "step": 500
108
+ },
109
+ {
110
+ "epoch": 4.0,
111
+ "eval_gen_len": 19.27,
112
+ "eval_loss": 3.5507638454437256,
113
+ "eval_rouge1": 15.4023,
114
+ "eval_rouge2": 4.1761,
115
+ "eval_rougeL": 12.5635,
116
+ "eval_rougeLsum": 13.7679,
117
+ "eval_runtime": 10.8402,
118
+ "eval_samples_per_second": 9.225,
119
+ "eval_steps_per_second": 1.199,
120
+ "step": 500
121
+ },
122
+ {
123
+ "epoch": 4.4,
124
+ "learning_rate": 3.319209039548023e-05,
125
+ "loss": 1.7219,
126
+ "step": 550
127
+ },
128
+ {
129
+ "epoch": 4.8,
130
+ "learning_rate": 3.0838041431261774e-05,
131
+ "loss": 1.8199,
132
+ "step": 600
133
+ },
134
+ {
135
+ "epoch": 5.0,
136
+ "eval_gen_len": 19.93,
137
+ "eval_loss": 3.7590889930725098,
138
+ "eval_rouge1": 17.7686,
139
+ "eval_rouge2": 4.532,
140
+ "eval_rougeL": 13.9086,
141
+ "eval_rougeLsum": 15.3305,
142
+ "eval_runtime": 10.8632,
143
+ "eval_samples_per_second": 9.205,
144
+ "eval_steps_per_second": 1.197,
145
+ "step": 625
146
+ },
147
+ {
148
+ "epoch": 5.2,
149
+ "learning_rate": 2.8483992467043313e-05,
150
+ "loss": 1.5748,
151
+ "step": 650
152
+ },
153
+ {
154
+ "epoch": 5.6,
155
+ "learning_rate": 2.612994350282486e-05,
156
+ "loss": 1.3965,
157
+ "step": 700
158
+ },
159
+ {
160
+ "epoch": 6.0,
161
+ "learning_rate": 2.3775894538606405e-05,
162
+ "loss": 1.4575,
163
+ "step": 750
164
+ },
165
+ {
166
+ "epoch": 6.0,
167
+ "eval_gen_len": 19.92,
168
+ "eval_loss": 3.9725918769836426,
169
+ "eval_rouge1": 16.4133,
170
+ "eval_rouge2": 4.359,
171
+ "eval_rougeL": 13.4621,
172
+ "eval_rougeLsum": 14.5896,
173
+ "eval_runtime": 10.8642,
174
+ "eval_samples_per_second": 9.205,
175
+ "eval_steps_per_second": 1.197,
176
+ "step": 750
177
+ },
178
+ {
179
+ "epoch": 6.4,
180
+ "learning_rate": 2.1421845574387948e-05,
181
+ "loss": 1.1037,
182
+ "step": 800
183
+ },
184
+ {
185
+ "epoch": 6.8,
186
+ "learning_rate": 1.906779661016949e-05,
187
+ "loss": 1.126,
188
+ "step": 850
189
+ },
190
+ {
191
+ "epoch": 7.0,
192
+ "eval_gen_len": 19.43,
193
+ "eval_loss": 4.29640007019043,
194
+ "eval_rouge1": 17.3934,
195
+ "eval_rouge2": 3.6935,
196
+ "eval_rougeL": 13.7934,
197
+ "eval_rougeLsum": 14.9719,
198
+ "eval_runtime": 10.8699,
199
+ "eval_samples_per_second": 9.2,
200
+ "eval_steps_per_second": 1.196,
201
+ "step": 875
202
+ },
203
+ {
204
+ "epoch": 7.2,
205
+ "learning_rate": 1.6713747645951036e-05,
206
+ "loss": 1.0553,
207
+ "step": 900
208
+ },
209
+ {
210
+ "epoch": 7.6,
211
+ "learning_rate": 1.435969868173258e-05,
212
+ "loss": 0.9473,
213
+ "step": 950
214
+ },
215
+ {
216
+ "epoch": 8.0,
217
+ "learning_rate": 1.2005649717514125e-05,
218
+ "loss": 0.9073,
219
+ "step": 1000
220
+ },
221
+ {
222
+ "epoch": 8.0,
223
+ "eval_gen_len": 19.83,
224
+ "eval_loss": 4.420531749725342,
225
+ "eval_rouge1": 17.4328,
226
+ "eval_rouge2": 3.8734,
227
+ "eval_rougeL": 13.4282,
228
+ "eval_rougeLsum": 14.7105,
229
+ "eval_runtime": 10.9454,
230
+ "eval_samples_per_second": 9.136,
231
+ "eval_steps_per_second": 1.188,
232
+ "step": 1000
233
+ },
234
+ {
235
+ "epoch": 8.4,
236
+ "learning_rate": 9.65160075329567e-06,
237
+ "loss": 0.7636,
238
+ "step": 1050
239
+ },
240
+ {
241
+ "epoch": 8.8,
242
+ "learning_rate": 7.297551789077213e-06,
243
+ "loss": 0.7925,
244
+ "step": 1100
245
+ },
246
+ {
247
+ "epoch": 9.0,
248
+ "eval_gen_len": 19.87,
249
+ "eval_loss": 4.550107955932617,
250
+ "eval_rouge1": 17.3798,
251
+ "eval_rouge2": 3.9775,
252
+ "eval_rougeL": 13.3141,
253
+ "eval_rougeLsum": 14.7692,
254
+ "eval_runtime": 10.8417,
255
+ "eval_samples_per_second": 9.224,
256
+ "eval_steps_per_second": 1.199,
257
+ "step": 1125
258
+ },
259
+ {
260
+ "epoch": 9.2,
261
+ "learning_rate": 4.990583804143127e-06,
262
+ "loss": 0.7396,
263
+ "step": 1150
264
+ },
265
+ {
266
+ "epoch": 9.6,
267
+ "learning_rate": 2.6365348399246707e-06,
268
+ "loss": 0.6792,
269
+ "step": 1200
270
+ },
271
+ {
272
+ "epoch": 10.0,
273
+ "learning_rate": 2.8248587570621473e-07,
274
+ "loss": 0.6844,
275
+ "step": 1250
276
+ },
277
+ {
278
+ "epoch": 10.0,
279
+ "eval_gen_len": 19.81,
280
+ "eval_loss": 4.636623382568359,
281
+ "eval_rouge1": 17.3867,
282
+ "eval_rouge2": 4.2671,
283
+ "eval_rougeL": 13.7924,
284
+ "eval_rougeLsum": 15.1543,
285
+ "eval_runtime": 10.8893,
286
+ "eval_samples_per_second": 9.183,
287
+ "eval_steps_per_second": 1.194,
288
+ "step": 1250
289
+ },
290
+ {
291
+ "epoch": 10.0,
292
+ "step": 1250,
293
+ "total_flos": 2.278181410848768e+16,
294
+ "train_loss": 1.877090069580078,
295
+ "train_runtime": 1007.5303,
296
+ "train_samples_per_second": 9.876,
297
+ "train_steps_per_second": 1.241
298
+ }
299
+ ],
300
+ "max_steps": 1250,
301
+ "num_train_epochs": 10,
302
+ "total_flos": 2.278181410848768e+16,
303
+ "trial_name": null,
304
+ "trial_params": null
305
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:70f5cbd5c3fdf1dff1e73912d6b5f5e3bf6950bf344cc4feff7de3eec81290ac
3
+ size 2799
vocab.json ADDED
The diff for this file is too large to render. See raw diff