nomsgadded commited on
Commit
b40530e
·
1 Parent(s): 80cb67e

End of training

Browse files
Files changed (5) hide show
  1. README.md +5 -5
  2. all_results.json +13 -13
  3. eval_results.json +8 -8
  4. train_results.json +6 -6
  5. trainer_state.json +288 -42
README.md CHANGED
@@ -14,7 +14,7 @@ model-index:
14
  name: Masked Language Modeling
15
  type: fill-mask
16
  dataset:
17
- name: wikitext
18
  type: wikitext
19
  config: wikitext-2-raw-v1
20
  split: validation
@@ -22,7 +22,7 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.7278010101558682
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -30,10 +30,10 @@ should probably proofread and complete it, then remove this comment. -->
30
 
31
  # mlm
32
 
33
- This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the wikitext dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 1.2607
36
- - Accuracy: 0.7278
37
 
38
  ## Model description
39
 
 
14
  name: Masked Language Modeling
15
  type: fill-mask
16
  dataset:
17
+ name: wikitext wikitext-2-raw-v1
18
  type: wikitext
19
  config: wikitext-2-raw-v1
20
  split: validation
 
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.7302927161334241
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
30
 
31
  # mlm
32
 
33
+ This model is a fine-tuned version of [roberta-base](https://huggingface.co/roberta-base) on the wikitext wikitext-2-raw-v1 dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 1.2468
36
+ - Accuracy: 0.7303
37
 
38
  ## Model description
39
 
all_results.json CHANGED
@@ -1,15 +1,15 @@
1
  {
2
- "epoch": 0.99,
3
- "eval_accuracy": 0.37187601824698596,
4
- "eval_loss": 3.4801909923553467,
5
- "eval_runtime": 104.7921,
6
- "eval_samples": 240,
7
- "eval_samples_per_second": 2.29,
8
- "eval_steps_per_second": 0.286,
9
- "perplexity": 32.46592222670883,
10
- "train_loss": 3.7061044375101724,
11
- "train_runtime": 2296.6495,
12
- "train_samples": 2318,
13
- "train_samples_per_second": 1.009,
14
- "train_steps_per_second": 0.031
15
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.7302927161334241,
4
+ "eval_loss": 1.2467830181121826,
5
+ "eval_runtime": 14.0489,
6
+ "eval_samples": 496,
7
+ "eval_samples_per_second": 35.305,
8
+ "eval_steps_per_second": 4.413,
9
+ "perplexity": 3.479132628765549,
10
+ "train_loss": 1.4182749218410915,
11
+ "train_runtime": 1171.1026,
12
+ "train_samples": 4798,
13
+ "train_samples_per_second": 12.291,
14
+ "train_steps_per_second": 0.384
15
  }
eval_results.json CHANGED
@@ -1,10 +1,10 @@
1
  {
2
- "epoch": 0.99,
3
- "eval_accuracy": 0.37187601824698596,
4
- "eval_loss": 3.4801909923553467,
5
- "eval_runtime": 104.7921,
6
- "eval_samples": 240,
7
- "eval_samples_per_second": 2.29,
8
- "eval_steps_per_second": 0.286,
9
- "perplexity": 32.46592222670883
10
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "eval_accuracy": 0.7302927161334241,
4
+ "eval_loss": 1.2467830181121826,
5
+ "eval_runtime": 14.0489,
6
+ "eval_samples": 496,
7
+ "eval_samples_per_second": 35.305,
8
+ "eval_steps_per_second": 4.413,
9
+ "perplexity": 3.479132628765549
10
  }
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "epoch": 0.99,
3
- "train_loss": 3.7061044375101724,
4
- "train_runtime": 2296.6495,
5
- "train_samples": 2318,
6
- "train_samples_per_second": 1.009,
7
- "train_steps_per_second": 0.031
8
  }
 
1
  {
2
+ "epoch": 3.0,
3
+ "train_loss": 1.4182749218410915,
4
+ "train_runtime": 1171.1026,
5
+ "train_samples": 4798,
6
+ "train_samples_per_second": 12.291,
7
+ "train_steps_per_second": 0.384
8
  }
trainer_state.json CHANGED
@@ -1,79 +1,325 @@
1
  {
2
- "best_metric": 0.37187601824698596,
3
- "best_model_checkpoint": ".\\output\\checkpoint-72",
4
- "epoch": 0.993103448275862,
5
  "eval_steps": 500,
6
- "global_step": 72,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
- "epoch": 0.14,
13
- "learning_rate": 2.90625e-05,
14
- "loss": 3.9372,
15
  "step": 10
16
  },
17
  {
18
- "epoch": 0.28,
19
- "learning_rate": 2.4375e-05,
20
- "loss": 3.76,
21
  "step": 20
22
  },
23
  {
24
- "epoch": 0.41,
25
- "learning_rate": 1.96875e-05,
26
- "loss": 3.6869,
27
  "step": 30
28
  },
29
  {
30
- "epoch": 0.55,
31
- "learning_rate": 1.5e-05,
32
- "loss": 3.6689,
33
  "step": 40
34
  },
35
  {
36
- "epoch": 0.69,
37
- "learning_rate": 1.03125e-05,
38
- "loss": 3.6483,
39
  "step": 50
40
  },
41
  {
42
- "epoch": 0.83,
43
- "learning_rate": 5.625e-06,
44
- "loss": 3.6523,
45
  "step": 60
46
  },
47
  {
48
- "epoch": 0.97,
49
- "learning_rate": 9.375e-07,
50
- "loss": 3.6158,
51
  "step": 70
52
  },
53
  {
54
- "epoch": 0.99,
55
- "eval_accuracy": 0.37187601824698596,
56
- "eval_loss": 3.4801909923553467,
57
- "eval_runtime": 101.4158,
58
- "eval_samples_per_second": 2.366,
59
- "eval_steps_per_second": 0.296,
60
- "step": 72
61
  },
62
  {
63
- "epoch": 0.99,
64
- "step": 72,
65
- "total_flos": 602027713363968.0,
66
- "train_loss": 3.7061044375101724,
67
- "train_runtime": 2296.6495,
68
- "train_samples_per_second": 1.009,
69
- "train_steps_per_second": 0.031
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
70
  }
71
  ],
72
  "logging_steps": 10,
73
- "max_steps": 72,
74
- "num_train_epochs": 1,
75
  "save_steps": 100.0,
76
- "total_flos": 602027713363968.0,
77
  "trial_name": null,
78
  "trial_params": null
79
  }
 
1
  {
2
+ "best_metric": 0.7278010101558682,
3
+ "best_model_checkpoint": ".\\output\\checkpoint-450",
4
+ "epoch": 3.0,
5
  "eval_steps": 500,
6
+ "global_step": 450,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
10
  "log_history": [
11
  {
12
+ "epoch": 0.07,
13
+ "learning_rate": 6e-06,
14
+ "loss": 1.8646,
15
  "step": 10
16
  },
17
  {
18
+ "epoch": 0.13,
19
+ "learning_rate": 1.2666666666666667e-05,
20
+ "loss": 1.7999,
21
  "step": 20
22
  },
23
  {
24
+ "epoch": 0.2,
25
+ "learning_rate": 1.9333333333333333e-05,
26
+ "loss": 1.6122,
27
  "step": 30
28
  },
29
  {
30
+ "epoch": 0.27,
31
+ "learning_rate": 2.6000000000000002e-05,
32
+ "loss": 1.5558,
33
  "step": 40
34
  },
35
  {
36
+ "epoch": 0.33,
37
+ "learning_rate": 2.9703703703703707e-05,
38
+ "loss": 1.4972,
39
  "step": 50
40
  },
41
  {
42
+ "epoch": 0.4,
43
+ "learning_rate": 2.8962962962962965e-05,
44
+ "loss": 1.482,
45
  "step": 60
46
  },
47
  {
48
+ "epoch": 0.47,
49
+ "learning_rate": 2.8222222222222223e-05,
50
+ "loss": 1.4642,
51
  "step": 70
52
  },
53
  {
54
+ "epoch": 0.53,
55
+ "learning_rate": 2.7481481481481482e-05,
56
+ "loss": 1.4455,
57
+ "step": 80
 
 
 
58
  },
59
  {
60
+ "epoch": 0.6,
61
+ "learning_rate": 2.6740740740740743e-05,
62
+ "loss": 1.4228,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 0.67,
67
+ "learning_rate": 2.6000000000000002e-05,
68
+ "loss": 1.4093,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 0.73,
73
+ "learning_rate": 2.525925925925926e-05,
74
+ "loss": 1.3687,
75
+ "step": 110
76
+ },
77
+ {
78
+ "epoch": 0.8,
79
+ "learning_rate": 2.451851851851852e-05,
80
+ "loss": 1.4491,
81
+ "step": 120
82
+ },
83
+ {
84
+ "epoch": 0.87,
85
+ "learning_rate": 2.377777777777778e-05,
86
+ "loss": 1.4152,
87
+ "step": 130
88
+ },
89
+ {
90
+ "epoch": 0.93,
91
+ "learning_rate": 2.303703703703704e-05,
92
+ "loss": 1.4672,
93
+ "step": 140
94
+ },
95
+ {
96
+ "epoch": 1.0,
97
+ "learning_rate": 2.2296296296296297e-05,
98
+ "loss": 1.3758,
99
+ "step": 150
100
+ },
101
+ {
102
+ "epoch": 1.0,
103
+ "eval_accuracy": 0.7276595744680852,
104
+ "eval_loss": 1.2825729846954346,
105
+ "eval_runtime": 14.8174,
106
+ "eval_samples_per_second": 33.474,
107
+ "eval_steps_per_second": 4.184,
108
+ "step": 150
109
+ },
110
+ {
111
+ "epoch": 1.07,
112
+ "learning_rate": 2.155555555555556e-05,
113
+ "loss": 1.372,
114
+ "step": 160
115
+ },
116
+ {
117
+ "epoch": 1.13,
118
+ "learning_rate": 2.0814814814814817e-05,
119
+ "loss": 1.4247,
120
+ "step": 170
121
+ },
122
+ {
123
+ "epoch": 1.2,
124
+ "learning_rate": 2.0074074074074075e-05,
125
+ "loss": 1.3985,
126
+ "step": 180
127
+ },
128
+ {
129
+ "epoch": 1.27,
130
+ "learning_rate": 1.9333333333333333e-05,
131
+ "loss": 1.3752,
132
+ "step": 190
133
+ },
134
+ {
135
+ "epoch": 1.33,
136
+ "learning_rate": 1.8592592592592595e-05,
137
+ "loss": 1.4129,
138
+ "step": 200
139
+ },
140
+ {
141
+ "epoch": 1.4,
142
+ "learning_rate": 1.7851851851851853e-05,
143
+ "loss": 1.3847,
144
+ "step": 210
145
+ },
146
+ {
147
+ "epoch": 1.47,
148
+ "learning_rate": 1.7111111111111112e-05,
149
+ "loss": 1.3855,
150
+ "step": 220
151
+ },
152
+ {
153
+ "epoch": 1.53,
154
+ "learning_rate": 1.6370370370370374e-05,
155
+ "loss": 1.3916,
156
+ "step": 230
157
+ },
158
+ {
159
+ "epoch": 1.6,
160
+ "learning_rate": 1.5629629629629632e-05,
161
+ "loss": 1.4184,
162
+ "step": 240
163
+ },
164
+ {
165
+ "epoch": 1.67,
166
+ "learning_rate": 1.4888888888888888e-05,
167
+ "loss": 1.3827,
168
+ "step": 250
169
+ },
170
+ {
171
+ "epoch": 1.73,
172
+ "learning_rate": 1.4148148148148148e-05,
173
+ "loss": 1.3742,
174
+ "step": 260
175
+ },
176
+ {
177
+ "epoch": 1.8,
178
+ "learning_rate": 1.3407407407407408e-05,
179
+ "loss": 1.4016,
180
+ "step": 270
181
+ },
182
+ {
183
+ "epoch": 1.87,
184
+ "learning_rate": 1.2666666666666667e-05,
185
+ "loss": 1.3888,
186
+ "step": 280
187
+ },
188
+ {
189
+ "epoch": 1.93,
190
+ "learning_rate": 1.1925925925925927e-05,
191
+ "loss": 1.3748,
192
+ "step": 290
193
+ },
194
+ {
195
+ "epoch": 2.0,
196
+ "learning_rate": 1.1185185185185185e-05,
197
+ "loss": 1.3763,
198
+ "step": 300
199
+ },
200
+ {
201
+ "epoch": 2.0,
202
+ "eval_accuracy": 0.727228590694538,
203
+ "eval_loss": 1.2746729850769043,
204
+ "eval_runtime": 14.4425,
205
+ "eval_samples_per_second": 34.343,
206
+ "eval_steps_per_second": 4.293,
207
+ "step": 300
208
+ },
209
+ {
210
+ "epoch": 2.07,
211
+ "learning_rate": 1.0444444444444445e-05,
212
+ "loss": 1.3781,
213
+ "step": 310
214
+ },
215
+ {
216
+ "epoch": 2.13,
217
+ "learning_rate": 9.703703703703703e-06,
218
+ "loss": 1.4019,
219
+ "step": 320
220
+ },
221
+ {
222
+ "epoch": 2.2,
223
+ "learning_rate": 8.962962962962963e-06,
224
+ "loss": 1.3862,
225
+ "step": 330
226
+ },
227
+ {
228
+ "epoch": 2.27,
229
+ "learning_rate": 8.222222222222222e-06,
230
+ "loss": 1.3784,
231
+ "step": 340
232
+ },
233
+ {
234
+ "epoch": 2.33,
235
+ "learning_rate": 7.481481481481482e-06,
236
+ "loss": 1.3407,
237
+ "step": 350
238
+ },
239
+ {
240
+ "epoch": 2.4,
241
+ "learning_rate": 6.740740740740741e-06,
242
+ "loss": 1.3496,
243
+ "step": 360
244
+ },
245
+ {
246
+ "epoch": 2.47,
247
+ "learning_rate": 6e-06,
248
+ "loss": 1.3681,
249
+ "step": 370
250
+ },
251
+ {
252
+ "epoch": 2.53,
253
+ "learning_rate": 5.25925925925926e-06,
254
+ "loss": 1.3453,
255
+ "step": 380
256
+ },
257
+ {
258
+ "epoch": 2.6,
259
+ "learning_rate": 4.518518518518519e-06,
260
+ "loss": 1.3294,
261
+ "step": 390
262
+ },
263
+ {
264
+ "epoch": 2.67,
265
+ "learning_rate": 3.7777777777777777e-06,
266
+ "loss": 1.3361,
267
+ "step": 400
268
+ },
269
+ {
270
+ "epoch": 2.73,
271
+ "learning_rate": 3.0370370370370372e-06,
272
+ "loss": 1.3615,
273
+ "step": 410
274
+ },
275
+ {
276
+ "epoch": 2.8,
277
+ "learning_rate": 2.2962962962962964e-06,
278
+ "loss": 1.3104,
279
+ "step": 420
280
+ },
281
+ {
282
+ "epoch": 2.87,
283
+ "learning_rate": 1.5555555555555556e-06,
284
+ "loss": 1.3395,
285
+ "step": 430
286
+ },
287
+ {
288
+ "epoch": 2.93,
289
+ "learning_rate": 8.148148148148149e-07,
290
+ "loss": 1.3499,
291
+ "step": 440
292
+ },
293
+ {
294
+ "epoch": 3.0,
295
+ "learning_rate": 7.407407407407407e-08,
296
+ "loss": 1.3558,
297
+ "step": 450
298
+ },
299
+ {
300
+ "epoch": 3.0,
301
+ "eval_accuracy": 0.7278010101558682,
302
+ "eval_loss": 1.260701298713684,
303
+ "eval_runtime": 14.198,
304
+ "eval_samples_per_second": 34.935,
305
+ "eval_steps_per_second": 4.367,
306
+ "step": 450
307
+ },
308
+ {
309
+ "epoch": 3.0,
310
+ "step": 450,
311
+ "total_flos": 3789443078682624.0,
312
+ "train_loss": 1.4182749218410915,
313
+ "train_runtime": 1171.1026,
314
+ "train_samples_per_second": 12.291,
315
+ "train_steps_per_second": 0.384
316
  }
317
  ],
318
  "logging_steps": 10,
319
+ "max_steps": 450,
320
+ "num_train_epochs": 3,
321
  "save_steps": 100.0,
322
+ "total_flos": 3789443078682624.0,
323
  "trial_name": null,
324
  "trial_params": null
325
  }