RaphaelMourad commited on
Commit
9ed7b83
1 Parent(s): 3b5b366

Upload 9 files

Browse files
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "data/models/Mixtral-8x7B-v0.2-dna",
3
+ "architectures": [
4
+ "MixtralForCausalLM"
5
+ ],
6
+ "attention_dropout": 0.0,
7
+ "bos_token_id": 1,
8
+ "eos_token_id": 2,
9
+ "hidden_act": "silu",
10
+ "hidden_size": 768,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 768,
13
+ "max_position_embeddings": 512,
14
+ "model_type": "mixtral",
15
+ "num_attention_heads": 8,
16
+ "num_experts_per_tok": 1,
17
+ "num_hidden_layers": 8,
18
+ "num_key_value_heads": 8,
19
+ "num_local_experts": 8,
20
+ "output_router_logits": false,
21
+ "rms_norm_eps": 1e-05,
22
+ "rope_theta": 1000000.0,
23
+ "router_aux_loss_coef": 0.02,
24
+ "router_jitter_noise": 0.0,
25
+ "sliding_window": null,
26
+ "tie_word_embeddings": false,
27
+ "torch_dtype": "bfloat16",
28
+ "transformers_version": "4.41.1",
29
+ "use_cache": true,
30
+ "vocab_size": 4096
31
+ }
generation_config.json ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 1,
4
+ "eos_token_id": 2,
5
+ "transformers_version": "4.41.1"
6
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73768b6de5f3ff03fd576b6d915d746ef56aa3a2c14d0ca08f8f47d75939b945
3
+ size 276979168
rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d245e05e72192c132e0f2edb6fdcae0c578c890f0fe912f17ec7b0bba2d38cc3
3
+ size 14244
scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:22393f8cfc6918428595434364b94b6e95be4bcda53cd002c9bf3a02e3eafd8d
3
+ size 1064
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1 @@
 
 
1
+ {"tokenizer_class": "PreTrainedTokenizerFast", "unk_token": "[UNK]", "cls_token": "[CLS]", "sep_token": "[SEP]", "pad_token": "[PAD]", "mask_token": "[MASK]"}
trainer_state.json ADDED
@@ -0,0 +1,712 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 6.946237564086914,
3
+ "best_model_checkpoint": "./results/models/checkpoint-45320",
4
+ "epoch": 5.0,
5
+ "eval_steps": 500,
6
+ "global_step": 45320,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.05516328331862312,
13
+ "grad_norm": 0.3359375,
14
+ "learning_rate": 0.004994483671668138,
15
+ "loss": 7.1096,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.11032656663724624,
20
+ "grad_norm": 0.2890625,
21
+ "learning_rate": 0.004988967343336276,
22
+ "loss": 6.9824,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 0.16548984995586938,
27
+ "grad_norm": 0.369140625,
28
+ "learning_rate": 0.004983451015004413,
29
+ "loss": 6.971,
30
+ "step": 1500
31
+ },
32
+ {
33
+ "epoch": 0.22065313327449249,
34
+ "grad_norm": 0.8046875,
35
+ "learning_rate": 0.004977934686672551,
36
+ "loss": 7.0129,
37
+ "step": 2000
38
+ },
39
+ {
40
+ "epoch": 0.2758164165931156,
41
+ "grad_norm": 4.59375,
42
+ "learning_rate": 0.004972418358340689,
43
+ "loss": 7.0571,
44
+ "step": 2500
45
+ },
46
+ {
47
+ "epoch": 0.33097969991173876,
48
+ "grad_norm": 4.03125,
49
+ "learning_rate": 0.004966902030008827,
50
+ "loss": 7.0532,
51
+ "step": 3000
52
+ },
53
+ {
54
+ "epoch": 0.3861429832303619,
55
+ "grad_norm": 3.75,
56
+ "learning_rate": 0.0049613857016769635,
57
+ "loss": 7.0486,
58
+ "step": 3500
59
+ },
60
+ {
61
+ "epoch": 0.44130626654898497,
62
+ "grad_norm": 9.125,
63
+ "learning_rate": 0.004955869373345101,
64
+ "loss": 7.0385,
65
+ "step": 4000
66
+ },
67
+ {
68
+ "epoch": 0.4964695498676081,
69
+ "grad_norm": 9.1875,
70
+ "learning_rate": 0.00495035304501324,
71
+ "loss": 7.0304,
72
+ "step": 4500
73
+ },
74
+ {
75
+ "epoch": 0.5516328331862312,
76
+ "grad_norm": 12.625,
77
+ "learning_rate": 0.004944836716681377,
78
+ "loss": 7.023,
79
+ "step": 5000
80
+ },
81
+ {
82
+ "epoch": 0.6067961165048543,
83
+ "grad_norm": 8.6875,
84
+ "learning_rate": 0.0049393203883495145,
85
+ "loss": 7.0205,
86
+ "step": 5500
87
+ },
88
+ {
89
+ "epoch": 0.6619593998234775,
90
+ "grad_norm": 7.6875,
91
+ "learning_rate": 0.004933804060017652,
92
+ "loss": 7.025,
93
+ "step": 6000
94
+ },
95
+ {
96
+ "epoch": 0.7171226831421006,
97
+ "grad_norm": 4.96875,
98
+ "learning_rate": 0.00492828773168579,
99
+ "loss": 7.0285,
100
+ "step": 6500
101
+ },
102
+ {
103
+ "epoch": 0.7722859664607238,
104
+ "grad_norm": 5.46875,
105
+ "learning_rate": 0.004922771403353928,
106
+ "loss": 7.025,
107
+ "step": 7000
108
+ },
109
+ {
110
+ "epoch": 0.8274492497793469,
111
+ "grad_norm": 20.125,
112
+ "learning_rate": 0.0049172550750220655,
113
+ "loss": 7.0162,
114
+ "step": 7500
115
+ },
116
+ {
117
+ "epoch": 0.8826125330979699,
118
+ "grad_norm": 7.1875,
119
+ "learning_rate": 0.004911738746690203,
120
+ "loss": 7.0154,
121
+ "step": 8000
122
+ },
123
+ {
124
+ "epoch": 0.9377758164165931,
125
+ "grad_norm": 8.1875,
126
+ "learning_rate": 0.004906222418358341,
127
+ "loss": 7.0104,
128
+ "step": 8500
129
+ },
130
+ {
131
+ "epoch": 0.9929390997352162,
132
+ "grad_norm": 6.71875,
133
+ "learning_rate": 0.004900706090026478,
134
+ "loss": 7.0374,
135
+ "step": 9000
136
+ },
137
+ {
138
+ "epoch": 1.0,
139
+ "eval_loss": 7.0309014320373535,
140
+ "eval_runtime": 9.4135,
141
+ "eval_samples_per_second": 53.115,
142
+ "eval_steps_per_second": 1.7,
143
+ "step": 9064
144
+ },
145
+ {
146
+ "epoch": 1.0481023830538394,
147
+ "grad_norm": 3.5,
148
+ "learning_rate": 0.0048951897616946165,
149
+ "loss": 7.02,
150
+ "step": 9500
151
+ },
152
+ {
153
+ "epoch": 1.1032656663724625,
154
+ "grad_norm": 10.5,
155
+ "learning_rate": 0.004889673433362754,
156
+ "loss": 7.0297,
157
+ "step": 10000
158
+ },
159
+ {
160
+ "epoch": 1.1584289496910856,
161
+ "grad_norm": 8.875,
162
+ "learning_rate": 0.004884157105030891,
163
+ "loss": 7.0361,
164
+ "step": 10500
165
+ },
166
+ {
167
+ "epoch": 1.2135922330097086,
168
+ "grad_norm": 3.5625,
169
+ "learning_rate": 0.004878640776699029,
170
+ "loss": 7.015,
171
+ "step": 11000
172
+ },
173
+ {
174
+ "epoch": 1.268755516328332,
175
+ "grad_norm": 9.1875,
176
+ "learning_rate": 0.0048731244483671676,
177
+ "loss": 6.9996,
178
+ "step": 11500
179
+ },
180
+ {
181
+ "epoch": 1.323918799646955,
182
+ "grad_norm": 13.625,
183
+ "learning_rate": 0.0048676081200353044,
184
+ "loss": 6.9975,
185
+ "step": 12000
186
+ },
187
+ {
188
+ "epoch": 1.379082082965578,
189
+ "grad_norm": 6.34375,
190
+ "learning_rate": 0.004862091791703442,
191
+ "loss": 7.0081,
192
+ "step": 12500
193
+ },
194
+ {
195
+ "epoch": 1.4342453662842012,
196
+ "grad_norm": 14.375,
197
+ "learning_rate": 0.00485657546337158,
198
+ "loss": 7.0008,
199
+ "step": 13000
200
+ },
201
+ {
202
+ "epoch": 1.4894086496028245,
203
+ "grad_norm": 8.8125,
204
+ "learning_rate": 0.004851059135039718,
205
+ "loss": 6.9962,
206
+ "step": 13500
207
+ },
208
+ {
209
+ "epoch": 1.5445719329214476,
210
+ "grad_norm": 22.625,
211
+ "learning_rate": 0.0048455428067078555,
212
+ "loss": 6.9928,
213
+ "step": 14000
214
+ },
215
+ {
216
+ "epoch": 1.5997352162400706,
217
+ "grad_norm": 12.6875,
218
+ "learning_rate": 0.004840026478375993,
219
+ "loss": 6.9919,
220
+ "step": 14500
221
+ },
222
+ {
223
+ "epoch": 1.6548984995586937,
224
+ "grad_norm": 53.75,
225
+ "learning_rate": 0.004834510150044131,
226
+ "loss": 6.9923,
227
+ "step": 15000
228
+ },
229
+ {
230
+ "epoch": 1.7100617828773168,
231
+ "grad_norm": 9.9375,
232
+ "learning_rate": 0.004828993821712269,
233
+ "loss": 6.9966,
234
+ "step": 15500
235
+ },
236
+ {
237
+ "epoch": 1.7652250661959399,
238
+ "grad_norm": 13.0625,
239
+ "learning_rate": 0.004823477493380406,
240
+ "loss": 6.9908,
241
+ "step": 16000
242
+ },
243
+ {
244
+ "epoch": 1.820388349514563,
245
+ "grad_norm": 8.9375,
246
+ "learning_rate": 0.004817961165048544,
247
+ "loss": 6.9937,
248
+ "step": 16500
249
+ },
250
+ {
251
+ "epoch": 1.8755516328331863,
252
+ "grad_norm": 42.0,
253
+ "learning_rate": 0.004812444836716681,
254
+ "loss": 6.9894,
255
+ "step": 17000
256
+ },
257
+ {
258
+ "epoch": 1.9307149161518093,
259
+ "grad_norm": 17.5,
260
+ "learning_rate": 0.004806928508384819,
261
+ "loss": 6.9926,
262
+ "step": 17500
263
+ },
264
+ {
265
+ "epoch": 1.9858781994704324,
266
+ "grad_norm": 30.75,
267
+ "learning_rate": 0.004801412180052957,
268
+ "loss": 6.9989,
269
+ "step": 18000
270
+ },
271
+ {
272
+ "epoch": 2.0,
273
+ "eval_loss": 6.996521472930908,
274
+ "eval_runtime": 8.6549,
275
+ "eval_samples_per_second": 57.771,
276
+ "eval_steps_per_second": 1.849,
277
+ "step": 18128
278
+ },
279
+ {
280
+ "epoch": 2.0410414827890557,
281
+ "grad_norm": 12.125,
282
+ "learning_rate": 0.004795895851721094,
283
+ "loss": 6.9915,
284
+ "step": 18500
285
+ },
286
+ {
287
+ "epoch": 2.096204766107679,
288
+ "grad_norm": 18.625,
289
+ "learning_rate": 0.004790379523389232,
290
+ "loss": 6.9839,
291
+ "step": 19000
292
+ },
293
+ {
294
+ "epoch": 2.151368049426302,
295
+ "grad_norm": 21.25,
296
+ "learning_rate": 0.00478486319505737,
297
+ "loss": 6.9821,
298
+ "step": 19500
299
+ },
300
+ {
301
+ "epoch": 2.206531332744925,
302
+ "grad_norm": 21.375,
303
+ "learning_rate": 0.004779346866725508,
304
+ "loss": 6.9842,
305
+ "step": 20000
306
+ },
307
+ {
308
+ "epoch": 2.261694616063548,
309
+ "grad_norm": 16.625,
310
+ "learning_rate": 0.004773830538393645,
311
+ "loss": 6.9836,
312
+ "step": 20500
313
+ },
314
+ {
315
+ "epoch": 2.316857899382171,
316
+ "grad_norm": 17.125,
317
+ "learning_rate": 0.004768314210061783,
318
+ "loss": 6.9837,
319
+ "step": 21000
320
+ },
321
+ {
322
+ "epoch": 2.372021182700794,
323
+ "grad_norm": 8.9375,
324
+ "learning_rate": 0.004762797881729921,
325
+ "loss": 6.9822,
326
+ "step": 21500
327
+ },
328
+ {
329
+ "epoch": 2.4271844660194173,
330
+ "grad_norm": 8.25,
331
+ "learning_rate": 0.004757281553398059,
332
+ "loss": 6.9791,
333
+ "step": 22000
334
+ },
335
+ {
336
+ "epoch": 2.4823477493380404,
337
+ "grad_norm": 8.375,
338
+ "learning_rate": 0.0047517652250661955,
339
+ "loss": 6.986,
340
+ "step": 22500
341
+ },
342
+ {
343
+ "epoch": 2.537511032656664,
344
+ "grad_norm": 10.6875,
345
+ "learning_rate": 0.004746248896734333,
346
+ "loss": 6.9807,
347
+ "step": 23000
348
+ },
349
+ {
350
+ "epoch": 2.592674315975287,
351
+ "grad_norm": 14.375,
352
+ "learning_rate": 0.004740732568402472,
353
+ "loss": 6.9822,
354
+ "step": 23500
355
+ },
356
+ {
357
+ "epoch": 2.64783759929391,
358
+ "grad_norm": 10.625,
359
+ "learning_rate": 0.004735216240070609,
360
+ "loss": 6.9701,
361
+ "step": 24000
362
+ },
363
+ {
364
+ "epoch": 2.703000882612533,
365
+ "grad_norm": 11.75,
366
+ "learning_rate": 0.0047296999117387465,
367
+ "loss": 6.9707,
368
+ "step": 24500
369
+ },
370
+ {
371
+ "epoch": 2.758164165931156,
372
+ "grad_norm": 30.375,
373
+ "learning_rate": 0.004724183583406884,
374
+ "loss": 6.9792,
375
+ "step": 25000
376
+ },
377
+ {
378
+ "epoch": 2.8133274492497793,
379
+ "grad_norm": 17.25,
380
+ "learning_rate": 0.004718667255075022,
381
+ "loss": 6.9803,
382
+ "step": 25500
383
+ },
384
+ {
385
+ "epoch": 2.8684907325684024,
386
+ "grad_norm": 9.0,
387
+ "learning_rate": 0.00471315092674316,
388
+ "loss": 6.9752,
389
+ "step": 26000
390
+ },
391
+ {
392
+ "epoch": 2.9236540158870254,
393
+ "grad_norm": 6.0,
394
+ "learning_rate": 0.0047076345984112975,
395
+ "loss": 6.9687,
396
+ "step": 26500
397
+ },
398
+ {
399
+ "epoch": 2.978817299205649,
400
+ "grad_norm": 12.0625,
401
+ "learning_rate": 0.004702118270079435,
402
+ "loss": 6.9635,
403
+ "step": 27000
404
+ },
405
+ {
406
+ "epoch": 3.0,
407
+ "eval_loss": 6.960501194000244,
408
+ "eval_runtime": 8.9893,
409
+ "eval_samples_per_second": 55.622,
410
+ "eval_steps_per_second": 1.78,
411
+ "step": 27192
412
+ },
413
+ {
414
+ "epoch": 3.033980582524272,
415
+ "grad_norm": 15.25,
416
+ "learning_rate": 0.004696601941747573,
417
+ "loss": 6.9586,
418
+ "step": 27500
419
+ },
420
+ {
421
+ "epoch": 3.089143865842895,
422
+ "grad_norm": 12.1875,
423
+ "learning_rate": 0.004691085613415711,
424
+ "loss": 6.9644,
425
+ "step": 28000
426
+ },
427
+ {
428
+ "epoch": 3.144307149161518,
429
+ "grad_norm": 12.125,
430
+ "learning_rate": 0.0046855692850838486,
431
+ "loss": 6.955,
432
+ "step": 28500
433
+ },
434
+ {
435
+ "epoch": 3.1994704324801413,
436
+ "grad_norm": 11.3125,
437
+ "learning_rate": 0.004680052956751986,
438
+ "loss": 6.9499,
439
+ "step": 29000
440
+ },
441
+ {
442
+ "epoch": 3.2546337157987644,
443
+ "grad_norm": 9.0625,
444
+ "learning_rate": 0.004674536628420123,
445
+ "loss": 6.9522,
446
+ "step": 29500
447
+ },
448
+ {
449
+ "epoch": 3.3097969991173875,
450
+ "grad_norm": 10.25,
451
+ "learning_rate": 0.004669020300088262,
452
+ "loss": 6.952,
453
+ "step": 30000
454
+ },
455
+ {
456
+ "epoch": 3.3649602824360105,
457
+ "grad_norm": 15.6875,
458
+ "learning_rate": 0.0046635039717564,
459
+ "loss": 6.9524,
460
+ "step": 30500
461
+ },
462
+ {
463
+ "epoch": 3.4201235657546336,
464
+ "grad_norm": 18.875,
465
+ "learning_rate": 0.0046579876434245365,
466
+ "loss": 6.9502,
467
+ "step": 31000
468
+ },
469
+ {
470
+ "epoch": 3.4752868490732567,
471
+ "grad_norm": 8.6875,
472
+ "learning_rate": 0.004652471315092674,
473
+ "loss": 6.9465,
474
+ "step": 31500
475
+ },
476
+ {
477
+ "epoch": 3.5304501323918798,
478
+ "grad_norm": 16.375,
479
+ "learning_rate": 0.004646954986760812,
480
+ "loss": 6.9453,
481
+ "step": 32000
482
+ },
483
+ {
484
+ "epoch": 3.585613415710503,
485
+ "grad_norm": 20.5,
486
+ "learning_rate": 0.00464143865842895,
487
+ "loss": 6.9477,
488
+ "step": 32500
489
+ },
490
+ {
491
+ "epoch": 3.6407766990291264,
492
+ "grad_norm": 15.5625,
493
+ "learning_rate": 0.0046359223300970875,
494
+ "loss": 6.9507,
495
+ "step": 33000
496
+ },
497
+ {
498
+ "epoch": 3.6959399823477495,
499
+ "grad_norm": 47.5,
500
+ "learning_rate": 0.004630406001765225,
501
+ "loss": 6.9513,
502
+ "step": 33500
503
+ },
504
+ {
505
+ "epoch": 3.7511032656663725,
506
+ "grad_norm": 17.125,
507
+ "learning_rate": 0.004624889673433363,
508
+ "loss": 6.9489,
509
+ "step": 34000
510
+ },
511
+ {
512
+ "epoch": 3.8062665489849956,
513
+ "grad_norm": 190.0,
514
+ "learning_rate": 0.004619373345101501,
515
+ "loss": 6.9486,
516
+ "step": 34500
517
+ },
518
+ {
519
+ "epoch": 3.8614298323036187,
520
+ "grad_norm": 12.375,
521
+ "learning_rate": 0.0046138570167696385,
522
+ "loss": 6.954,
523
+ "step": 35000
524
+ },
525
+ {
526
+ "epoch": 3.9165931156222418,
527
+ "grad_norm": 12.25,
528
+ "learning_rate": 0.004608340688437776,
529
+ "loss": 6.945,
530
+ "step": 35500
531
+ },
532
+ {
533
+ "epoch": 3.971756398940865,
534
+ "grad_norm": 9.6875,
535
+ "learning_rate": 0.004602824360105914,
536
+ "loss": 6.9449,
537
+ "step": 36000
538
+ },
539
+ {
540
+ "epoch": 4.0,
541
+ "eval_loss": 6.9535441398620605,
542
+ "eval_runtime": 9.4952,
543
+ "eval_samples_per_second": 52.658,
544
+ "eval_steps_per_second": 1.685,
545
+ "step": 36256
546
+ },
547
+ {
548
+ "epoch": 4.026919682259488,
549
+ "grad_norm": 13.375,
550
+ "learning_rate": 0.004597308031774051,
551
+ "loss": 6.9449,
552
+ "step": 36500
553
+ },
554
+ {
555
+ "epoch": 4.0820829655781115,
556
+ "grad_norm": 12.0625,
557
+ "learning_rate": 0.0045917917034421895,
558
+ "loss": 6.9412,
559
+ "step": 37000
560
+ },
561
+ {
562
+ "epoch": 4.1372462488967345,
563
+ "grad_norm": 12.8125,
564
+ "learning_rate": 0.004586275375110326,
565
+ "loss": 6.9405,
566
+ "step": 37500
567
+ },
568
+ {
569
+ "epoch": 4.192409532215358,
570
+ "grad_norm": 10.6875,
571
+ "learning_rate": 0.004580759046778464,
572
+ "loss": 6.9472,
573
+ "step": 38000
574
+ },
575
+ {
576
+ "epoch": 4.247572815533981,
577
+ "grad_norm": 12.625,
578
+ "learning_rate": 0.004575242718446602,
579
+ "loss": 6.9417,
580
+ "step": 38500
581
+ },
582
+ {
583
+ "epoch": 4.302736098852604,
584
+ "grad_norm": 7.96875,
585
+ "learning_rate": 0.00456972639011474,
586
+ "loss": 6.9397,
587
+ "step": 39000
588
+ },
589
+ {
590
+ "epoch": 4.357899382171227,
591
+ "grad_norm": 9.5625,
592
+ "learning_rate": 0.004564210061782877,
593
+ "loss": 6.9399,
594
+ "step": 39500
595
+ },
596
+ {
597
+ "epoch": 4.41306266548985,
598
+ "grad_norm": 8.9375,
599
+ "learning_rate": 0.004558693733451015,
600
+ "loss": 6.9393,
601
+ "step": 40000
602
+ },
603
+ {
604
+ "epoch": 4.468225948808473,
605
+ "grad_norm": 12.875,
606
+ "learning_rate": 0.004553177405119153,
607
+ "loss": 6.9348,
608
+ "step": 40500
609
+ },
610
+ {
611
+ "epoch": 4.523389232127096,
612
+ "grad_norm": 20.125,
613
+ "learning_rate": 0.004547661076787291,
614
+ "loss": 6.94,
615
+ "step": 41000
616
+ },
617
+ {
618
+ "epoch": 4.578552515445719,
619
+ "grad_norm": 12.0,
620
+ "learning_rate": 0.0045421447484554275,
621
+ "loss": 6.9417,
622
+ "step": 41500
623
+ },
624
+ {
625
+ "epoch": 4.633715798764342,
626
+ "grad_norm": 14.6875,
627
+ "learning_rate": 0.004536628420123566,
628
+ "loss": 6.9362,
629
+ "step": 42000
630
+ },
631
+ {
632
+ "epoch": 4.688879082082965,
633
+ "grad_norm": 12.3125,
634
+ "learning_rate": 0.004531112091791704,
635
+ "loss": 6.9365,
636
+ "step": 42500
637
+ },
638
+ {
639
+ "epoch": 4.744042365401588,
640
+ "grad_norm": 9.1875,
641
+ "learning_rate": 0.004525595763459841,
642
+ "loss": 6.9306,
643
+ "step": 43000
644
+ },
645
+ {
646
+ "epoch": 4.7992056487202115,
647
+ "grad_norm": 36.5,
648
+ "learning_rate": 0.0045200794351279786,
649
+ "loss": 6.9657,
650
+ "step": 43500
651
+ },
652
+ {
653
+ "epoch": 4.854368932038835,
654
+ "grad_norm": 33.25,
655
+ "learning_rate": 0.004514563106796117,
656
+ "loss": 6.953,
657
+ "step": 44000
658
+ },
659
+ {
660
+ "epoch": 4.9095322153574585,
661
+ "grad_norm": 20.375,
662
+ "learning_rate": 0.004509046778464254,
663
+ "loss": 6.9506,
664
+ "step": 44500
665
+ },
666
+ {
667
+ "epoch": 4.964695498676081,
668
+ "grad_norm": 7.875,
669
+ "learning_rate": 0.004503530450132392,
670
+ "loss": 6.9484,
671
+ "step": 45000
672
+ },
673
+ {
674
+ "epoch": 5.0,
675
+ "eval_loss": 6.946237564086914,
676
+ "eval_runtime": 8.659,
677
+ "eval_samples_per_second": 57.744,
678
+ "eval_steps_per_second": 1.848,
679
+ "step": 45320
680
+ }
681
+ ],
682
+ "logging_steps": 500,
683
+ "max_steps": 453200,
684
+ "num_input_tokens_seen": 0,
685
+ "num_train_epochs": 50,
686
+ "save_steps": 500,
687
+ "stateful_callbacks": {
688
+ "EarlyStoppingCallback": {
689
+ "args": {
690
+ "early_stopping_patience": 3,
691
+ "early_stopping_threshold": 0.0
692
+ },
693
+ "attributes": {
694
+ "early_stopping_patience_counter": 0
695
+ }
696
+ },
697
+ "TrainerControl": {
698
+ "args": {
699
+ "should_epoch_stop": false,
700
+ "should_evaluate": false,
701
+ "should_log": false,
702
+ "should_save": true,
703
+ "should_training_stop": false
704
+ },
705
+ "attributes": {}
706
+ }
707
+ },
708
+ "total_flos": 2.553070427093514e+18,
709
+ "train_batch_size": 32,
710
+ "trial_name": null,
711
+ "trial_params": null
712
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:319c9dc6c26e8b23070399599998d5f4d8c0b3f366e1c9b8e47d1e27155aaa93
3
+ size 5048