nlparabic commited on
Commit
9920a15
1 Parent(s): b816dfd

End of training

Browse files
README.md CHANGED
@@ -17,11 +17,11 @@ should probably proofread and complete it, then remove this comment. -->
17
 
18
  This model is a fine-tuned version of [aubmindlab/aragpt2-base](https://huggingface.co/aubmindlab/aragpt2-base) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
- - Loss: 0.0556
21
- - Bleu: 0.0625
22
- - Rouge1: 0.3737
23
- - Rouge2: 0.1489
24
- - Rougel: 0.3670
25
 
26
  ## Model description
27
 
 
17
 
18
  This model is a fine-tuned version of [aubmindlab/aragpt2-base](https://huggingface.co/aubmindlab/aragpt2-base) on an unknown dataset.
19
  It achieves the following results on the evaluation set:
20
+ - Loss: 0.0534
21
+ - Bleu: 0.0428
22
+ - Rouge1: 0.3139
23
+ - Rouge2: 0.1104
24
+ - Rougel: 0.3097
25
 
26
  ## Model description
27
 
all_results.json ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 14.0,
3
+ "eval_bleu": 0.04280893852047394,
4
+ "eval_loss": 0.05337703973054886,
5
+ "eval_rouge1": 0.31388693258006284,
6
+ "eval_rouge2": 0.11036995985896458,
7
+ "eval_rougeL": 0.3096668301159336,
8
+ "eval_runtime": 7.0173,
9
+ "eval_samples": 304,
10
+ "eval_samples_per_second": 43.322,
11
+ "eval_steps_per_second": 5.415,
12
+ "perplexity": 1.0548272819463649,
13
+ "total_flos": 8918419636224000.0,
14
+ "train_loss": 0.46412634738241815,
15
+ "train_runtime": 1276.9352,
16
+ "train_samples": 1219,
17
+ "train_samples_per_second": 19.093,
18
+ "train_steps_per_second": 2.396
19
+ }
egy_training_log.txt CHANGED
@@ -304,3 +304,5 @@ INFO:root:Epoch 13.0: Train Loss = 0.0331, Eval Loss = 0.05399588495492935
304
  INFO:absl:Using default tokenizer.
305
  INFO:root:Epoch 14.0: Train Loss = 0.0308, Eval Loss = 0.05515788868069649
306
  INFO:absl:Using default tokenizer.
 
 
 
304
  INFO:absl:Using default tokenizer.
305
  INFO:root:Epoch 14.0: Train Loss = 0.0308, Eval Loss = 0.05515788868069649
306
  INFO:absl:Using default tokenizer.
307
+ INFO:__main__:*** Evaluate ***
308
+ INFO:absl:Using default tokenizer.
eval_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 14.0,
3
+ "eval_bleu": 0.04280893852047394,
4
+ "eval_loss": 0.05337703973054886,
5
+ "eval_rouge1": 0.31388693258006284,
6
+ "eval_rouge2": 0.11036995985896458,
7
+ "eval_rougeL": 0.3096668301159336,
8
+ "eval_runtime": 7.0173,
9
+ "eval_samples": 304,
10
+ "eval_samples_per_second": 43.322,
11
+ "eval_steps_per_second": 5.415,
12
+ "perplexity": 1.0548272819463649
13
+ }
train_results.json ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 14.0,
3
+ "total_flos": 8918419636224000.0,
4
+ "train_loss": 0.46412634738241815,
5
+ "train_runtime": 1276.9352,
6
+ "train_samples": 1219,
7
+ "train_samples_per_second": 19.093,
8
+ "train_steps_per_second": 2.396
9
+ }
train_vs_val_loss.png ADDED
trainer_state.json ADDED
@@ -0,0 +1,317 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.05337703973054886,
3
+ "best_model_checkpoint": "/home/iais_marenpielka/Bouthaina/res_nw_yem_aragpt2-base/checkpoint-1377",
4
+ "epoch": 14.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2142,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.0,
13
+ "grad_norm": 0.3274374306201935,
14
+ "learning_rate": 1.53e-05,
15
+ "loss": 5.8224,
16
+ "step": 153
17
+ },
18
+ {
19
+ "epoch": 1.0,
20
+ "eval_bleu": 0.00392224521343418,
21
+ "eval_loss": 0.1128806546330452,
22
+ "eval_rouge1": 0.07124894337258883,
23
+ "eval_rouge2": 0.0028587092731829577,
24
+ "eval_rougeL": 0.06948923152022689,
25
+ "eval_runtime": 7.1373,
26
+ "eval_samples_per_second": 42.593,
27
+ "eval_steps_per_second": 5.324,
28
+ "step": 153
29
+ },
30
+ {
31
+ "epoch": 2.0,
32
+ "grad_norm": 0.3334260880947113,
33
+ "learning_rate": 3.06e-05,
34
+ "loss": 0.1108,
35
+ "step": 306
36
+ },
37
+ {
38
+ "epoch": 2.0,
39
+ "eval_bleu": 0.0,
40
+ "eval_loss": 0.06912114471197128,
41
+ "eval_rouge1": 0.09507863230508208,
42
+ "eval_rouge2": 0.007788772376873063,
43
+ "eval_rougeL": 0.09317127790126112,
44
+ "eval_runtime": 6.9813,
45
+ "eval_samples_per_second": 43.545,
46
+ "eval_steps_per_second": 5.443,
47
+ "step": 306
48
+ },
49
+ {
50
+ "epoch": 3.0,
51
+ "grad_norm": 0.2704195976257324,
52
+ "learning_rate": 4.5900000000000004e-05,
53
+ "loss": 0.0775,
54
+ "step": 459
55
+ },
56
+ {
57
+ "epoch": 3.0,
58
+ "eval_bleu": 0.006737157818299724,
59
+ "eval_loss": 0.06276746839284897,
60
+ "eval_rouge1": 0.12907377035145118,
61
+ "eval_rouge2": 0.015683124441348124,
62
+ "eval_rougeL": 0.1286203798342947,
63
+ "eval_runtime": 6.9851,
64
+ "eval_samples_per_second": 43.521,
65
+ "eval_steps_per_second": 5.44,
66
+ "step": 459
67
+ },
68
+ {
69
+ "epoch": 4.0,
70
+ "grad_norm": 0.32800641655921936,
71
+ "learning_rate": 4.7812500000000003e-05,
72
+ "loss": 0.0678,
73
+ "step": 612
74
+ },
75
+ {
76
+ "epoch": 4.0,
77
+ "eval_bleu": 0.008597461521382226,
78
+ "eval_loss": 0.059174273163080215,
79
+ "eval_rouge1": 0.1524066357986381,
80
+ "eval_rouge2": 0.027263211934264564,
81
+ "eval_rougeL": 0.14917670820737453,
82
+ "eval_runtime": 6.9134,
83
+ "eval_samples_per_second": 43.973,
84
+ "eval_steps_per_second": 5.497,
85
+ "step": 612
86
+ },
87
+ {
88
+ "epoch": 5.0,
89
+ "grad_norm": 0.18473878502845764,
90
+ "learning_rate": 4.482421875e-05,
91
+ "loss": 0.0603,
92
+ "step": 765
93
+ },
94
+ {
95
+ "epoch": 5.0,
96
+ "eval_bleu": 0.016158903735540513,
97
+ "eval_loss": 0.05656920000910759,
98
+ "eval_rouge1": 0.19186830417118805,
99
+ "eval_rouge2": 0.04134465720083692,
100
+ "eval_rougeL": 0.18829939413002822,
101
+ "eval_runtime": 6.9842,
102
+ "eval_samples_per_second": 43.527,
103
+ "eval_steps_per_second": 5.441,
104
+ "step": 765
105
+ },
106
+ {
107
+ "epoch": 6.0,
108
+ "grad_norm": 0.2642894685268402,
109
+ "learning_rate": 4.18359375e-05,
110
+ "loss": 0.0547,
111
+ "step": 918
112
+ },
113
+ {
114
+ "epoch": 6.0,
115
+ "eval_bleu": 0.018741315914778903,
116
+ "eval_loss": 0.05464606359601021,
117
+ "eval_rouge1": 0.2238907288379966,
118
+ "eval_rouge2": 0.05989126658873175,
119
+ "eval_rougeL": 0.22184683032173025,
120
+ "eval_runtime": 7.0034,
121
+ "eval_samples_per_second": 43.408,
122
+ "eval_steps_per_second": 5.426,
123
+ "step": 918
124
+ },
125
+ {
126
+ "epoch": 7.0,
127
+ "grad_norm": 0.24372832477092743,
128
+ "learning_rate": 3.884765625e-05,
129
+ "loss": 0.0498,
130
+ "step": 1071
131
+ },
132
+ {
133
+ "epoch": 7.0,
134
+ "eval_bleu": 0.029468363597052377,
135
+ "eval_loss": 0.054019927978515625,
136
+ "eval_rouge1": 0.26839447635173574,
137
+ "eval_rouge2": 0.07325536872796896,
138
+ "eval_rougeL": 0.2638108570075386,
139
+ "eval_runtime": 6.9888,
140
+ "eval_samples_per_second": 43.498,
141
+ "eval_steps_per_second": 5.437,
142
+ "step": 1071
143
+ },
144
+ {
145
+ "epoch": 8.0,
146
+ "grad_norm": 0.2509661316871643,
147
+ "learning_rate": 3.5859375e-05,
148
+ "loss": 0.0456,
149
+ "step": 1224
150
+ },
151
+ {
152
+ "epoch": 8.0,
153
+ "eval_bleu": 0.029195772701040013,
154
+ "eval_loss": 0.053611643612384796,
155
+ "eval_rouge1": 0.2883971192764231,
156
+ "eval_rouge2": 0.08184845936724905,
157
+ "eval_rougeL": 0.28406572737215363,
158
+ "eval_runtime": 6.9871,
159
+ "eval_samples_per_second": 43.509,
160
+ "eval_steps_per_second": 5.439,
161
+ "step": 1224
162
+ },
163
+ {
164
+ "epoch": 9.0,
165
+ "grad_norm": 0.23149649798870087,
166
+ "learning_rate": 3.287109375e-05,
167
+ "loss": 0.0419,
168
+ "step": 1377
169
+ },
170
+ {
171
+ "epoch": 9.0,
172
+ "eval_bleu": 0.04280893852047394,
173
+ "eval_loss": 0.05337703973054886,
174
+ "eval_rouge1": 0.31388693258006284,
175
+ "eval_rouge2": 0.11036995985896458,
176
+ "eval_rougeL": 0.3096668301159336,
177
+ "eval_runtime": 6.9883,
178
+ "eval_samples_per_second": 43.501,
179
+ "eval_steps_per_second": 5.438,
180
+ "step": 1377
181
+ },
182
+ {
183
+ "epoch": 10.0,
184
+ "grad_norm": 0.2521679401397705,
185
+ "learning_rate": 2.9882812500000002e-05,
186
+ "loss": 0.0385,
187
+ "step": 1530
188
+ },
189
+ {
190
+ "epoch": 10.0,
191
+ "eval_bleu": 0.046113511822263795,
192
+ "eval_loss": 0.05344715714454651,
193
+ "eval_rouge1": 0.32550355167793193,
194
+ "eval_rouge2": 0.11175887195986847,
195
+ "eval_rougeL": 0.3185482802099305,
196
+ "eval_runtime": 6.9943,
197
+ "eval_samples_per_second": 43.464,
198
+ "eval_steps_per_second": 5.433,
199
+ "step": 1530
200
+ },
201
+ {
202
+ "epoch": 11.0,
203
+ "grad_norm": 0.23900777101516724,
204
+ "learning_rate": 2.689453125e-05,
205
+ "loss": 0.0354,
206
+ "step": 1683
207
+ },
208
+ {
209
+ "epoch": 11.0,
210
+ "eval_bleu": 0.047259792068284935,
211
+ "eval_loss": 0.05403715744614601,
212
+ "eval_rouge1": 0.3357586244098568,
213
+ "eval_rouge2": 0.12191279657273882,
214
+ "eval_rougeL": 0.3287623290777512,
215
+ "eval_runtime": 7.0002,
216
+ "eval_samples_per_second": 43.427,
217
+ "eval_steps_per_second": 5.428,
218
+ "step": 1683
219
+ },
220
+ {
221
+ "epoch": 12.0,
222
+ "grad_norm": 0.24781078100204468,
223
+ "learning_rate": 2.3906250000000002e-05,
224
+ "loss": 0.0331,
225
+ "step": 1836
226
+ },
227
+ {
228
+ "epoch": 12.0,
229
+ "eval_bleu": 0.0475663160301358,
230
+ "eval_loss": 0.05399588495492935,
231
+ "eval_rouge1": 0.3483203778520053,
232
+ "eval_rouge2": 0.13122980561384404,
233
+ "eval_rougeL": 0.3442445629111672,
234
+ "eval_runtime": 6.9958,
235
+ "eval_samples_per_second": 43.455,
236
+ "eval_steps_per_second": 5.432,
237
+ "step": 1836
238
+ },
239
+ {
240
+ "epoch": 13.0,
241
+ "grad_norm": 0.3433144986629486,
242
+ "learning_rate": 2.091796875e-05,
243
+ "loss": 0.0308,
244
+ "step": 1989
245
+ },
246
+ {
247
+ "epoch": 13.0,
248
+ "eval_bleu": 0.05895730441080768,
249
+ "eval_loss": 0.05515788868069649,
250
+ "eval_rouge1": 0.3599156447280303,
251
+ "eval_rouge2": 0.14385576959590118,
252
+ "eval_rougeL": 0.3539109033921022,
253
+ "eval_runtime": 6.9936,
254
+ "eval_samples_per_second": 43.469,
255
+ "eval_steps_per_second": 5.434,
256
+ "step": 1989
257
+ },
258
+ {
259
+ "epoch": 14.0,
260
+ "grad_norm": 0.3219660520553589,
261
+ "learning_rate": 1.79296875e-05,
262
+ "loss": 0.0291,
263
+ "step": 2142
264
+ },
265
+ {
266
+ "epoch": 14.0,
267
+ "eval_bleu": 0.06246557033315383,
268
+ "eval_loss": 0.05557234585285187,
269
+ "eval_rouge1": 0.37373672906337496,
270
+ "eval_rouge2": 0.14894175508943397,
271
+ "eval_rougeL": 0.3669928154707829,
272
+ "eval_runtime": 6.9886,
273
+ "eval_samples_per_second": 43.499,
274
+ "eval_steps_per_second": 5.437,
275
+ "step": 2142
276
+ },
277
+ {
278
+ "epoch": 14.0,
279
+ "step": 2142,
280
+ "total_flos": 8918419636224000.0,
281
+ "train_loss": 0.46412634738241815,
282
+ "train_runtime": 1276.9352,
283
+ "train_samples_per_second": 19.093,
284
+ "train_steps_per_second": 2.396
285
+ }
286
+ ],
287
+ "logging_steps": 500,
288
+ "max_steps": 3060,
289
+ "num_input_tokens_seen": 0,
290
+ "num_train_epochs": 20,
291
+ "save_steps": 500,
292
+ "stateful_callbacks": {
293
+ "EarlyStoppingCallback": {
294
+ "args": {
295
+ "early_stopping_patience": 5,
296
+ "early_stopping_threshold": 0.0
297
+ },
298
+ "attributes": {
299
+ "early_stopping_patience_counter": 0
300
+ }
301
+ },
302
+ "TrainerControl": {
303
+ "args": {
304
+ "should_epoch_stop": false,
305
+ "should_evaluate": false,
306
+ "should_log": false,
307
+ "should_save": true,
308
+ "should_training_stop": true
309
+ },
310
+ "attributes": {}
311
+ }
312
+ },
313
+ "total_flos": 8918419636224000.0,
314
+ "train_batch_size": 8,
315
+ "trial_name": null,
316
+ "trial_params": null
317
+ }