mgh6 commited on
Commit
f2bfd73
1 Parent(s): 618bcb0

Training in progress, step 1000

Browse files
config.json CHANGED
@@ -1,5 +1,5 @@
1
  {
2
- "_name_or_path": "facebook/esm2_t12_35M_UR50D",
3
  "architectures": [
4
  "EsmForMaskedLM"
5
  ],
 
1
  {
2
+ "_name_or_path": "mgh6/TCS_MLM_All",
3
  "architectures": [
4
  "EsmForMaskedLM"
5
  ],
last-checkpoint/.ipynb_checkpoints/trainer_state-checkpoint.json ADDED
@@ -0,0 +1,901 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 29.868722148620982,
5
+ "eval_steps": 500,
6
+ "global_step": 124000,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.24087679152113695,
13
+ "grad_norm": 0.5082331299781799,
14
+ "learning_rate": 0.000991969806472336,
15
+ "loss": 1.4852,
16
+ "step": 1000
17
+ },
18
+ {
19
+ "epoch": 0.4817535830422739,
20
+ "grad_norm": 0.4949457347393036,
21
+ "learning_rate": 0.0009839396129446719,
22
+ "loss": 1.369,
23
+ "step": 2000
24
+ },
25
+ {
26
+ "epoch": 0.7226303745634108,
27
+ "grad_norm": 0.5630651116371155,
28
+ "learning_rate": 0.0009759094194170079,
29
+ "loss": 1.3189,
30
+ "step": 3000
31
+ },
32
+ {
33
+ "epoch": 0.9635071660845478,
34
+ "grad_norm": 0.5108479261398315,
35
+ "learning_rate": 0.000967879225889344,
36
+ "loss": 1.2817,
37
+ "step": 4000
38
+ },
39
+ {
40
+ "epoch": 1.2043839576056847,
41
+ "grad_norm": 0.47163671255111694,
42
+ "learning_rate": 0.00095984903236168,
43
+ "loss": 1.2511,
44
+ "step": 5000
45
+ },
46
+ {
47
+ "epoch": 1.4452607491268217,
48
+ "grad_norm": 0.531821072101593,
49
+ "learning_rate": 0.0009518188388340159,
50
+ "loss": 1.2309,
51
+ "step": 6000
52
+ },
53
+ {
54
+ "epoch": 1.6861375406479586,
55
+ "grad_norm": 0.4891831874847412,
56
+ "learning_rate": 0.0009437886453063518,
57
+ "loss": 1.2088,
58
+ "step": 7000
59
+ },
60
+ {
61
+ "epoch": 1.9270143321690956,
62
+ "grad_norm": 0.4778994619846344,
63
+ "learning_rate": 0.0009357584517786879,
64
+ "loss": 1.1936,
65
+ "step": 8000
66
+ },
67
+ {
68
+ "epoch": 2.1678911236902323,
69
+ "grad_norm": 0.5436965823173523,
70
+ "learning_rate": 0.0009277282582510239,
71
+ "loss": 1.1792,
72
+ "step": 9000
73
+ },
74
+ {
75
+ "epoch": 2.4087679152113695,
76
+ "grad_norm": 0.45789214968681335,
77
+ "learning_rate": 0.0009196980647233599,
78
+ "loss": 1.1609,
79
+ "step": 10000
80
+ },
81
+ {
82
+ "epoch": 2.649644706732506,
83
+ "grad_norm": 0.49292871356010437,
84
+ "learning_rate": 0.0009116678711956958,
85
+ "loss": 1.1465,
86
+ "step": 11000
87
+ },
88
+ {
89
+ "epoch": 2.8905214982536434,
90
+ "grad_norm": 0.4877796769142151,
91
+ "learning_rate": 0.0009036376776680318,
92
+ "loss": 1.1339,
93
+ "step": 12000
94
+ },
95
+ {
96
+ "epoch": 3.13139828977478,
97
+ "grad_norm": 0.45624956488609314,
98
+ "learning_rate": 0.0008956074841403679,
99
+ "loss": 1.1235,
100
+ "step": 13000
101
+ },
102
+ {
103
+ "epoch": 3.3722750812959172,
104
+ "grad_norm": 0.45705732703208923,
105
+ "learning_rate": 0.0008875772906127038,
106
+ "loss": 1.114,
107
+ "step": 14000
108
+ },
109
+ {
110
+ "epoch": 3.613151872817054,
111
+ "grad_norm": 0.4534723460674286,
112
+ "learning_rate": 0.0008795470970850398,
113
+ "loss": 1.1022,
114
+ "step": 15000
115
+ },
116
+ {
117
+ "epoch": 3.854028664338191,
118
+ "grad_norm": 0.5117238163948059,
119
+ "learning_rate": 0.0008715169035573758,
120
+ "loss": 1.0936,
121
+ "step": 16000
122
+ },
123
+ {
124
+ "epoch": 4.094905455859328,
125
+ "grad_norm": 0.4832773804664612,
126
+ "learning_rate": 0.0008634867100297117,
127
+ "loss": 1.0895,
128
+ "step": 17000
129
+ },
130
+ {
131
+ "epoch": 4.335782247380465,
132
+ "grad_norm": 0.5012445449829102,
133
+ "learning_rate": 0.0008554565165020477,
134
+ "loss": 1.0793,
135
+ "step": 18000
136
+ },
137
+ {
138
+ "epoch": 4.576659038901602,
139
+ "grad_norm": 0.5131984353065491,
140
+ "learning_rate": 0.0008474263229743838,
141
+ "loss": 1.0737,
142
+ "step": 19000
143
+ },
144
+ {
145
+ "epoch": 4.817535830422739,
146
+ "grad_norm": 0.5085521340370178,
147
+ "learning_rate": 0.0008393961294467197,
148
+ "loss": 1.068,
149
+ "step": 20000
150
+ },
151
+ {
152
+ "epoch": 5.058412621943876,
153
+ "grad_norm": 0.4861578345298767,
154
+ "learning_rate": 0.0008313659359190556,
155
+ "loss": 1.0596,
156
+ "step": 21000
157
+ },
158
+ {
159
+ "epoch": 5.299289413465012,
160
+ "grad_norm": 0.4493337869644165,
161
+ "learning_rate": 0.0008233357423913917,
162
+ "loss": 1.0549,
163
+ "step": 22000
164
+ },
165
+ {
166
+ "epoch": 5.54016620498615,
167
+ "grad_norm": 0.5158481001853943,
168
+ "learning_rate": 0.0008153055488637277,
169
+ "loss": 1.0501,
170
+ "step": 23000
171
+ },
172
+ {
173
+ "epoch": 5.781042996507287,
174
+ "grad_norm": 0.5318390130996704,
175
+ "learning_rate": 0.0008072753553360636,
176
+ "loss": 1.0423,
177
+ "step": 24000
178
+ },
179
+ {
180
+ "epoch": 6.021919788028423,
181
+ "grad_norm": 0.4411888122558594,
182
+ "learning_rate": 0.0007992451618083995,
183
+ "loss": 1.0329,
184
+ "step": 25000
185
+ },
186
+ {
187
+ "epoch": 6.26279657954956,
188
+ "grad_norm": 0.5529501438140869,
189
+ "learning_rate": 0.0007912149682807356,
190
+ "loss": 1.0304,
191
+ "step": 26000
192
+ },
193
+ {
194
+ "epoch": 6.503673371070697,
195
+ "grad_norm": 0.4464714229106903,
196
+ "learning_rate": 0.0007831847747530716,
197
+ "loss": 1.0246,
198
+ "step": 27000
199
+ },
200
+ {
201
+ "epoch": 6.7445501625918345,
202
+ "grad_norm": 0.5307004451751709,
203
+ "learning_rate": 0.0007751545812254074,
204
+ "loss": 1.0229,
205
+ "step": 28000
206
+ },
207
+ {
208
+ "epoch": 6.985426954112971,
209
+ "grad_norm": 0.5123757123947144,
210
+ "learning_rate": 0.0007671243876977435,
211
+ "loss": 1.0172,
212
+ "step": 29000
213
+ },
214
+ {
215
+ "epoch": 7.226303745634108,
216
+ "grad_norm": 0.47260963916778564,
217
+ "learning_rate": 0.0007590941941700795,
218
+ "loss": 1.0122,
219
+ "step": 30000
220
+ },
221
+ {
222
+ "epoch": 7.4671805371552455,
223
+ "grad_norm": 0.5172567963600159,
224
+ "learning_rate": 0.0007510640006424156,
225
+ "loss": 1.0041,
226
+ "step": 31000
227
+ },
228
+ {
229
+ "epoch": 7.708057328676382,
230
+ "grad_norm": 0.5100764036178589,
231
+ "learning_rate": 0.0007430338071147515,
232
+ "loss": 0.9996,
233
+ "step": 32000
234
+ },
235
+ {
236
+ "epoch": 7.948934120197519,
237
+ "grad_norm": 0.5113524198532104,
238
+ "learning_rate": 0.0007350036135870874,
239
+ "loss": 0.9962,
240
+ "step": 33000
241
+ },
242
+ {
243
+ "epoch": 8.189810911718656,
244
+ "grad_norm": 0.5284003615379333,
245
+ "learning_rate": 0.0007269734200594235,
246
+ "loss": 0.9902,
247
+ "step": 34000
248
+ },
249
+ {
250
+ "epoch": 8.430687703239792,
251
+ "grad_norm": 0.45138561725616455,
252
+ "learning_rate": 0.0007189432265317595,
253
+ "loss": 0.9853,
254
+ "step": 35000
255
+ },
256
+ {
257
+ "epoch": 8.67156449476093,
258
+ "grad_norm": 0.5026872158050537,
259
+ "learning_rate": 0.0007109130330040954,
260
+ "loss": 0.9817,
261
+ "step": 36000
262
+ },
263
+ {
264
+ "epoch": 8.912441286282068,
265
+ "grad_norm": 0.553321123123169,
266
+ "learning_rate": 0.0007028828394764313,
267
+ "loss": 0.9842,
268
+ "step": 37000
269
+ },
270
+ {
271
+ "epoch": 9.153318077803204,
272
+ "grad_norm": 0.4765004515647888,
273
+ "learning_rate": 0.0006948526459487674,
274
+ "loss": 0.9748,
275
+ "step": 38000
276
+ },
277
+ {
278
+ "epoch": 9.394194869324341,
279
+ "grad_norm": 0.4472289979457855,
280
+ "learning_rate": 0.0006868224524211034,
281
+ "loss": 0.9734,
282
+ "step": 39000
283
+ },
284
+ {
285
+ "epoch": 9.635071660845478,
286
+ "grad_norm": 0.4811370372772217,
287
+ "learning_rate": 0.0006787922588934393,
288
+ "loss": 0.9671,
289
+ "step": 40000
290
+ },
291
+ {
292
+ "epoch": 9.875948452366615,
293
+ "grad_norm": 0.5080583691596985,
294
+ "learning_rate": 0.0006707620653657753,
295
+ "loss": 0.9687,
296
+ "step": 41000
297
+ },
298
+ {
299
+ "epoch": 10.116825243887751,
300
+ "grad_norm": 0.49223220348358154,
301
+ "learning_rate": 0.0006627318718381113,
302
+ "loss": 0.9592,
303
+ "step": 42000
304
+ },
305
+ {
306
+ "epoch": 10.357702035408888,
307
+ "grad_norm": 0.5603600740432739,
308
+ "learning_rate": 0.0006547016783104472,
309
+ "loss": 0.9559,
310
+ "step": 43000
311
+ },
312
+ {
313
+ "epoch": 10.598578826930025,
314
+ "grad_norm": 0.503847599029541,
315
+ "learning_rate": 0.0006466714847827833,
316
+ "loss": 0.9533,
317
+ "step": 44000
318
+ },
319
+ {
320
+ "epoch": 10.839455618451161,
321
+ "grad_norm": 0.4978269934654236,
322
+ "learning_rate": 0.0006386412912551193,
323
+ "loss": 0.9486,
324
+ "step": 45000
325
+ },
326
+ {
327
+ "epoch": 11.0803324099723,
328
+ "grad_norm": 0.5506151914596558,
329
+ "learning_rate": 0.0006306110977274552,
330
+ "loss": 0.951,
331
+ "step": 46000
332
+ },
333
+ {
334
+ "epoch": 11.321209201493437,
335
+ "grad_norm": 0.5171232223510742,
336
+ "learning_rate": 0.0006225809041997912,
337
+ "loss": 0.9399,
338
+ "step": 47000
339
+ },
340
+ {
341
+ "epoch": 11.562085993014573,
342
+ "grad_norm": 0.656745970249176,
343
+ "learning_rate": 0.0006145507106721272,
344
+ "loss": 0.937,
345
+ "step": 48000
346
+ },
347
+ {
348
+ "epoch": 11.80296278453571,
349
+ "grad_norm": 0.5088077783584595,
350
+ "learning_rate": 0.0006065205171444633,
351
+ "loss": 0.9376,
352
+ "step": 49000
353
+ },
354
+ {
355
+ "epoch": 12.043839576056847,
356
+ "grad_norm": 0.4850046932697296,
357
+ "learning_rate": 0.0005984903236167992,
358
+ "loss": 0.9302,
359
+ "step": 50000
360
+ },
361
+ {
362
+ "epoch": 12.284716367577984,
363
+ "grad_norm": 0.5553488731384277,
364
+ "learning_rate": 0.0005904601300891351,
365
+ "loss": 0.9302,
366
+ "step": 51000
367
+ },
368
+ {
369
+ "epoch": 12.52559315909912,
370
+ "grad_norm": 0.47318387031555176,
371
+ "learning_rate": 0.0005824299365614711,
372
+ "loss": 0.9298,
373
+ "step": 52000
374
+ },
375
+ {
376
+ "epoch": 12.766469950620257,
377
+ "grad_norm": 0.6019132733345032,
378
+ "learning_rate": 0.0005743997430338072,
379
+ "loss": 0.9241,
380
+ "step": 53000
381
+ },
382
+ {
383
+ "epoch": 13.007346742141396,
384
+ "grad_norm": 0.49550944566726685,
385
+ "learning_rate": 0.000566369549506143,
386
+ "loss": 0.9236,
387
+ "step": 54000
388
+ },
389
+ {
390
+ "epoch": 13.248223533662532,
391
+ "grad_norm": 0.5007643103599548,
392
+ "learning_rate": 0.000558339355978479,
393
+ "loss": 0.9196,
394
+ "step": 55000
395
+ },
396
+ {
397
+ "epoch": 13.489100325183669,
398
+ "grad_norm": 0.5525193810462952,
399
+ "learning_rate": 0.0005503091624508151,
400
+ "loss": 0.9113,
401
+ "step": 56000
402
+ },
403
+ {
404
+ "epoch": 13.729977116704806,
405
+ "grad_norm": 0.5109050273895264,
406
+ "learning_rate": 0.0005422789689231511,
407
+ "loss": 0.9077,
408
+ "step": 57000
409
+ },
410
+ {
411
+ "epoch": 13.970853908225942,
412
+ "grad_norm": 0.5197868347167969,
413
+ "learning_rate": 0.000534248775395487,
414
+ "loss": 0.9076,
415
+ "step": 58000
416
+ },
417
+ {
418
+ "epoch": 14.21173069974708,
419
+ "grad_norm": 0.4690844416618347,
420
+ "learning_rate": 0.000526218581867823,
421
+ "loss": 0.9097,
422
+ "step": 59000
423
+ },
424
+ {
425
+ "epoch": 14.452607491268216,
426
+ "grad_norm": 0.5010888576507568,
427
+ "learning_rate": 0.000518188388340159,
428
+ "loss": 0.9022,
429
+ "step": 60000
430
+ },
431
+ {
432
+ "epoch": 14.693484282789353,
433
+ "grad_norm": 0.5394883751869202,
434
+ "learning_rate": 0.000510158194812495,
435
+ "loss": 0.8982,
436
+ "step": 61000
437
+ },
438
+ {
439
+ "epoch": 14.934361074310491,
440
+ "grad_norm": 0.5398752689361572,
441
+ "learning_rate": 0.000502128001284831,
442
+ "loss": 0.8982,
443
+ "step": 62000
444
+ },
445
+ {
446
+ "epoch": 15.175237865831628,
447
+ "grad_norm": 0.48452773690223694,
448
+ "learning_rate": 0.0004940978077571669,
449
+ "loss": 0.8937,
450
+ "step": 63000
451
+ },
452
+ {
453
+ "epoch": 15.416114657352765,
454
+ "grad_norm": 0.5147862434387207,
455
+ "learning_rate": 0.00048606761422950295,
456
+ "loss": 0.894,
457
+ "step": 64000
458
+ },
459
+ {
460
+ "epoch": 15.656991448873901,
461
+ "grad_norm": 0.5301661491394043,
462
+ "learning_rate": 0.00047803742070183893,
463
+ "loss": 0.8865,
464
+ "step": 65000
465
+ },
466
+ {
467
+ "epoch": 15.897868240395038,
468
+ "grad_norm": 0.49967125058174133,
469
+ "learning_rate": 0.0004700072271741749,
470
+ "loss": 0.8825,
471
+ "step": 66000
472
+ },
473
+ {
474
+ "epoch": 16.138745031916176,
475
+ "grad_norm": 0.4977249801158905,
476
+ "learning_rate": 0.0004619770336465109,
477
+ "loss": 0.884,
478
+ "step": 67000
479
+ },
480
+ {
481
+ "epoch": 16.37962182343731,
482
+ "grad_norm": 0.5272189378738403,
483
+ "learning_rate": 0.0004539468401188468,
484
+ "loss": 0.8799,
485
+ "step": 68000
486
+ },
487
+ {
488
+ "epoch": 16.62049861495845,
489
+ "grad_norm": 0.5125630497932434,
490
+ "learning_rate": 0.00044591664659118286,
491
+ "loss": 0.8809,
492
+ "step": 69000
493
+ },
494
+ {
495
+ "epoch": 16.861375406479585,
496
+ "grad_norm": 0.4780360460281372,
497
+ "learning_rate": 0.00043788645306351885,
498
+ "loss": 0.8744,
499
+ "step": 70000
500
+ },
501
+ {
502
+ "epoch": 17.102252198000723,
503
+ "grad_norm": 0.541357159614563,
504
+ "learning_rate": 0.00042985625953585483,
505
+ "loss": 0.8729,
506
+ "step": 71000
507
+ },
508
+ {
509
+ "epoch": 17.34312898952186,
510
+ "grad_norm": 0.5186867713928223,
511
+ "learning_rate": 0.0004218260660081908,
512
+ "loss": 0.8673,
513
+ "step": 72000
514
+ },
515
+ {
516
+ "epoch": 17.584005781042997,
517
+ "grad_norm": 0.5490289330482483,
518
+ "learning_rate": 0.0004137958724805268,
519
+ "loss": 0.8644,
520
+ "step": 73000
521
+ },
522
+ {
523
+ "epoch": 17.824882572564132,
524
+ "grad_norm": 0.5589401125907898,
525
+ "learning_rate": 0.0004057656789528628,
526
+ "loss": 0.8676,
527
+ "step": 74000
528
+ },
529
+ {
530
+ "epoch": 18.06575936408527,
531
+ "grad_norm": 0.5271314382553101,
532
+ "learning_rate": 0.00039773548542519876,
533
+ "loss": 0.861,
534
+ "step": 75000
535
+ },
536
+ {
537
+ "epoch": 18.30663615560641,
538
+ "grad_norm": 0.586135983467102,
539
+ "learning_rate": 0.00038970529189753474,
540
+ "loss": 0.8556,
541
+ "step": 76000
542
+ },
543
+ {
544
+ "epoch": 18.547512947127544,
545
+ "grad_norm": 0.5827994346618652,
546
+ "learning_rate": 0.00038167509836987073,
547
+ "loss": 0.8565,
548
+ "step": 77000
549
+ },
550
+ {
551
+ "epoch": 18.788389738648682,
552
+ "grad_norm": 0.5451443195343018,
553
+ "learning_rate": 0.0003736449048422067,
554
+ "loss": 0.8554,
555
+ "step": 78000
556
+ },
557
+ {
558
+ "epoch": 19.029266530169817,
559
+ "grad_norm": 0.6020991206169128,
560
+ "learning_rate": 0.0003656147113145427,
561
+ "loss": 0.8506,
562
+ "step": 79000
563
+ },
564
+ {
565
+ "epoch": 19.270143321690956,
566
+ "grad_norm": 0.48624420166015625,
567
+ "learning_rate": 0.0003575845177868787,
568
+ "loss": 0.8449,
569
+ "step": 80000
570
+ },
571
+ {
572
+ "epoch": 19.51102011321209,
573
+ "grad_norm": 0.6073954105377197,
574
+ "learning_rate": 0.0003495543242592146,
575
+ "loss": 0.8436,
576
+ "step": 81000
577
+ },
578
+ {
579
+ "epoch": 19.75189690473323,
580
+ "grad_norm": 0.5890400409698486,
581
+ "learning_rate": 0.00034152413073155064,
582
+ "loss": 0.8445,
583
+ "step": 82000
584
+ },
585
+ {
586
+ "epoch": 19.992773696254368,
587
+ "grad_norm": 0.5391818881034851,
588
+ "learning_rate": 0.0003334939372038866,
589
+ "loss": 0.8405,
590
+ "step": 83000
591
+ },
592
+ {
593
+ "epoch": 20.233650487775503,
594
+ "grad_norm": 0.5576732158660889,
595
+ "learning_rate": 0.0003254637436762226,
596
+ "loss": 0.8315,
597
+ "step": 84000
598
+ },
599
+ {
600
+ "epoch": 20.47452727929664,
601
+ "grad_norm": 0.5405558347702026,
602
+ "learning_rate": 0.0003174335501485586,
603
+ "loss": 0.8348,
604
+ "step": 85000
605
+ },
606
+ {
607
+ "epoch": 20.715404070817776,
608
+ "grad_norm": 0.5441027879714966,
609
+ "learning_rate": 0.0003094033566208946,
610
+ "loss": 0.8326,
611
+ "step": 86000
612
+ },
613
+ {
614
+ "epoch": 20.956280862338915,
615
+ "grad_norm": 0.5216940641403198,
616
+ "learning_rate": 0.00030137316309323056,
617
+ "loss": 0.8251,
618
+ "step": 87000
619
+ },
620
+ {
621
+ "epoch": 21.19715765386005,
622
+ "grad_norm": 0.6281733512878418,
623
+ "learning_rate": 0.00029334296956556654,
624
+ "loss": 0.8219,
625
+ "step": 88000
626
+ },
627
+ {
628
+ "epoch": 21.438034445381188,
629
+ "grad_norm": 0.6415626406669617,
630
+ "learning_rate": 0.0002853127760379025,
631
+ "loss": 0.8225,
632
+ "step": 89000
633
+ },
634
+ {
635
+ "epoch": 21.678911236902323,
636
+ "grad_norm": 0.5944454073905945,
637
+ "learning_rate": 0.0002772825825102385,
638
+ "loss": 0.8223,
639
+ "step": 90000
640
+ },
641
+ {
642
+ "epoch": 21.91978802842346,
643
+ "grad_norm": 0.5434209704399109,
644
+ "learning_rate": 0.0002692523889825745,
645
+ "loss": 0.8161,
646
+ "step": 91000
647
+ },
648
+ {
649
+ "epoch": 22.1606648199446,
650
+ "grad_norm": 0.5393619537353516,
651
+ "learning_rate": 0.0002612221954549105,
652
+ "loss": 0.8126,
653
+ "step": 92000
654
+ },
655
+ {
656
+ "epoch": 22.401541611465735,
657
+ "grad_norm": 0.6141464710235596,
658
+ "learning_rate": 0.00025319200192724646,
659
+ "loss": 0.8119,
660
+ "step": 93000
661
+ },
662
+ {
663
+ "epoch": 22.642418402986873,
664
+ "grad_norm": 0.6085337400436401,
665
+ "learning_rate": 0.00024516180839958244,
666
+ "loss": 0.8082,
667
+ "step": 94000
668
+ },
669
+ {
670
+ "epoch": 22.88329519450801,
671
+ "grad_norm": 0.6050975322723389,
672
+ "learning_rate": 0.00023713161487191843,
673
+ "loss": 0.8065,
674
+ "step": 95000
675
+ },
676
+ {
677
+ "epoch": 23.124171986029147,
678
+ "grad_norm": 0.5710690021514893,
679
+ "learning_rate": 0.0002291014213442544,
680
+ "loss": 0.7971,
681
+ "step": 96000
682
+ },
683
+ {
684
+ "epoch": 23.36504877755028,
685
+ "grad_norm": 0.5681021213531494,
686
+ "learning_rate": 0.0002210712278165904,
687
+ "loss": 0.7996,
688
+ "step": 97000
689
+ },
690
+ {
691
+ "epoch": 23.60592556907142,
692
+ "grad_norm": 0.5795422792434692,
693
+ "learning_rate": 0.00021304103428892638,
694
+ "loss": 0.8006,
695
+ "step": 98000
696
+ },
697
+ {
698
+ "epoch": 23.846802360592555,
699
+ "grad_norm": 0.6810296773910522,
700
+ "learning_rate": 0.00020501084076126236,
701
+ "loss": 0.7931,
702
+ "step": 99000
703
+ },
704
+ {
705
+ "epoch": 24.087679152113694,
706
+ "grad_norm": 0.5023326277732849,
707
+ "learning_rate": 0.00019698064723359831,
708
+ "loss": 0.7895,
709
+ "step": 100000
710
+ },
711
+ {
712
+ "epoch": 24.328555943634832,
713
+ "grad_norm": 0.6325027346611023,
714
+ "learning_rate": 0.0001889504537059343,
715
+ "loss": 0.7902,
716
+ "step": 101000
717
+ },
718
+ {
719
+ "epoch": 24.569432735155967,
720
+ "grad_norm": 0.5804798007011414,
721
+ "learning_rate": 0.00018092026017827028,
722
+ "loss": 0.7912,
723
+ "step": 102000
724
+ },
725
+ {
726
+ "epoch": 24.810309526677106,
727
+ "grad_norm": 0.5688096284866333,
728
+ "learning_rate": 0.00017289006665060626,
729
+ "loss": 0.7886,
730
+ "step": 103000
731
+ },
732
+ {
733
+ "epoch": 25.05118631819824,
734
+ "grad_norm": 0.5234955549240112,
735
+ "learning_rate": 0.00016485987312294225,
736
+ "loss": 0.7828,
737
+ "step": 104000
738
+ },
739
+ {
740
+ "epoch": 25.29206310971938,
741
+ "grad_norm": 0.5501936078071594,
742
+ "learning_rate": 0.00015682967959527826,
743
+ "loss": 0.7769,
744
+ "step": 105000
745
+ },
746
+ {
747
+ "epoch": 25.532939901240514,
748
+ "grad_norm": 0.6095595359802246,
749
+ "learning_rate": 0.00014879948606761424,
750
+ "loss": 0.7788,
751
+ "step": 106000
752
+ },
753
+ {
754
+ "epoch": 25.773816692761653,
755
+ "grad_norm": 0.6600815057754517,
756
+ "learning_rate": 0.00014076929253995022,
757
+ "loss": 0.778,
758
+ "step": 107000
759
+ },
760
+ {
761
+ "epoch": 26.01469348428279,
762
+ "grad_norm": 0.6249046921730042,
763
+ "learning_rate": 0.0001327390990122862,
764
+ "loss": 0.7701,
765
+ "step": 108000
766
+ },
767
+ {
768
+ "epoch": 26.255570275803926,
769
+ "grad_norm": 0.7678042054176331,
770
+ "learning_rate": 0.0001247089054846222,
771
+ "loss": 0.7661,
772
+ "step": 109000
773
+ },
774
+ {
775
+ "epoch": 26.496447067325064,
776
+ "grad_norm": 0.5331607460975647,
777
+ "learning_rate": 0.00011667871195695816,
778
+ "loss": 0.7689,
779
+ "step": 110000
780
+ },
781
+ {
782
+ "epoch": 26.7373238588462,
783
+ "grad_norm": 0.6330080628395081,
784
+ "learning_rate": 0.00010864851842929414,
785
+ "loss": 0.7655,
786
+ "step": 111000
787
+ },
788
+ {
789
+ "epoch": 26.978200650367338,
790
+ "grad_norm": 0.6966120004653931,
791
+ "learning_rate": 0.00010061832490163013,
792
+ "loss": 0.7593,
793
+ "step": 112000
794
+ },
795
+ {
796
+ "epoch": 27.219077441888473,
797
+ "grad_norm": 0.6162911653518677,
798
+ "learning_rate": 9.258813137396611e-05,
799
+ "loss": 0.7619,
800
+ "step": 113000
801
+ },
802
+ {
803
+ "epoch": 27.45995423340961,
804
+ "grad_norm": 0.6430843472480774,
805
+ "learning_rate": 8.45579378463021e-05,
806
+ "loss": 0.7598,
807
+ "step": 114000
808
+ },
809
+ {
810
+ "epoch": 27.700831024930746,
811
+ "grad_norm": 0.6248263120651245,
812
+ "learning_rate": 7.652774431863809e-05,
813
+ "loss": 0.7584,
814
+ "step": 115000
815
+ },
816
+ {
817
+ "epoch": 27.941707816451885,
818
+ "grad_norm": 0.6525952219963074,
819
+ "learning_rate": 6.849755079097407e-05,
820
+ "loss": 0.7536,
821
+ "step": 116000
822
+ },
823
+ {
824
+ "epoch": 28.182584607973023,
825
+ "grad_norm": 0.64836186170578,
826
+ "learning_rate": 6.046735726331005e-05,
827
+ "loss": 0.7535,
828
+ "step": 117000
829
+ },
830
+ {
831
+ "epoch": 28.42346139949416,
832
+ "grad_norm": 0.5937727689743042,
833
+ "learning_rate": 5.243716373564603e-05,
834
+ "loss": 0.7453,
835
+ "step": 118000
836
+ },
837
+ {
838
+ "epoch": 28.664338191015297,
839
+ "grad_norm": 0.5951708555221558,
840
+ "learning_rate": 4.440697020798201e-05,
841
+ "loss": 0.7462,
842
+ "step": 119000
843
+ },
844
+ {
845
+ "epoch": 28.90521498253643,
846
+ "grad_norm": 0.5529988408088684,
847
+ "learning_rate": 3.6376776680318e-05,
848
+ "loss": 0.7478,
849
+ "step": 120000
850
+ },
851
+ {
852
+ "epoch": 29.14609177405757,
853
+ "grad_norm": 0.5422804355621338,
854
+ "learning_rate": 2.834658315265398e-05,
855
+ "loss": 0.7454,
856
+ "step": 121000
857
+ },
858
+ {
859
+ "epoch": 29.386968565578705,
860
+ "grad_norm": 0.5853854417800903,
861
+ "learning_rate": 2.0316389624989963e-05,
862
+ "loss": 0.7424,
863
+ "step": 122000
864
+ },
865
+ {
866
+ "epoch": 29.627845357099844,
867
+ "grad_norm": 0.6154918074607849,
868
+ "learning_rate": 1.2286196097325946e-05,
869
+ "loss": 0.7431,
870
+ "step": 123000
871
+ },
872
+ {
873
+ "epoch": 29.868722148620982,
874
+ "grad_norm": 0.596747875213623,
875
+ "learning_rate": 4.256002569661929e-06,
876
+ "loss": 0.7426,
877
+ "step": 124000
878
+ }
879
+ ],
880
+ "logging_steps": 1000,
881
+ "max_steps": 124530,
882
+ "num_input_tokens_seen": 0,
883
+ "num_train_epochs": 30,
884
+ "save_steps": 1000,
885
+ "stateful_callbacks": {
886
+ "TrainerControl": {
887
+ "args": {
888
+ "should_epoch_stop": false,
889
+ "should_evaluate": false,
890
+ "should_log": false,
891
+ "should_save": true,
892
+ "should_training_stop": false
893
+ },
894
+ "attributes": {}
895
+ }
896
+ },
897
+ "total_flos": 1.2404512265548595e+18,
898
+ "train_batch_size": 64,
899
+ "trial_name": null,
900
+ "trial_params": null
901
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:68dfc7560d6b33287f16f326521b62bf70702c1316c8245d6e16f48e1915b6af
3
  size 136000488
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e5970541f7e1cd8bf63e9b88cdc475f6f50aac2e530fb1c43f2709a48d0b99b1
3
  size 136000488
runs/Jun03_03-45-38_training-full-0-0/events.out.tfevents.1717386343.training-full-0-0.13623.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4b7315b670cebb2d8525e6836d14ccc3e5ca931585420b3ddc3889b8efa62ead
3
+ size 4937
runs/Jun03_03-54-04_training-full-0-0/events.out.tfevents.1717386848.training-full-0-0.14978.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbdc7a0e7a985c400003e7a74410dd6d356972600e642505a9802cf61a147603
3
+ size 4942
runs/Jun03_03-56-12_training-full-0-0/events.out.tfevents.1717386973.training-full-0-0.15426.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:52c502959feee615567acfdf6b4d514d9d57502d64f51ad3799fb687d59012b8
3
+ size 4943
runs/Jun03_03-58-12_training-full-0-0/events.out.tfevents.1717387094.training-full-0-0.16021.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ffbecf82c551a95ec81faf27bc4bfc4ab2d4b56ca875907180dcc952904293d
3
+ size 5154
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:0e18ec5730a38ec86822609eef175321578f30afb709c8cd04e2b0522a0e28c7
3
  size 5112
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b19c50ebceb96bf9ab86b4ad80b203101b877c903c2c529277d18a83643e787
3
  size 5112