Ehsanl commited on
Commit
e88bd96
·
verified ·
1 Parent(s): 4fc864b

Checkpoint 765

Browse files
Files changed (3) hide show
  1. config.json +1 -1
  2. model.safetensors +2 -2
  3. trainer_state.json +177 -2
config.json CHANGED
@@ -6,7 +6,7 @@
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
  "cls_token_id": 1,
9
- "dtype": "bfloat16",
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 1024,
 
6
  "attention_probs_dropout_prob": 0.1,
7
  "classifier_dropout": null,
8
  "cls_token_id": 1,
9
+ "dtype": "float32",
10
  "hidden_act": "gelu",
11
  "hidden_dropout_prob": 0.1,
12
  "hidden_size": 1024,
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ef45eddf8de63b8d94c4ad643951c33ba47582e7a21116db3c54bbb912987848
3
- size 690656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f01bdaa4b46823cc9e998d4aceda02d6239ec88146554a6e308ef39a1d46e7c8
3
+ size 1338773320
trainer_state.json CHANGED
@@ -2,9 +2,9 @@
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
- "epoch": 0.5004906771344455,
6
  "eval_steps": 500,
7
- "global_step": 510,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
@@ -365,6 +365,181 @@
365
  "learning_rate": 1e-05,
366
  "loss": 2.3051,
367
  "step": 510
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
368
  }
369
  ],
370
  "logging_steps": 10,
 
2
  "best_global_step": null,
3
  "best_metric": null,
4
  "best_model_checkpoint": null,
5
+ "epoch": 0.7507360157016683,
6
  "eval_steps": 500,
7
+ "global_step": 765,
8
  "is_hyper_param_search": false,
9
  "is_local_process_zero": true,
10
  "is_world_process_zero": true,
 
365
  "learning_rate": 1e-05,
366
  "loss": 2.3051,
367
  "step": 510
368
+ },
369
+ {
370
+ "epoch": 0.5103042198233563,
371
+ "grad_norm": 4.162623882293701,
372
+ "learning_rate": 1e-05,
373
+ "loss": 2.5193,
374
+ "step": 520
375
+ },
376
+ {
377
+ "epoch": 0.5201177625122669,
378
+ "grad_norm": 3.865851879119873,
379
+ "learning_rate": 1e-05,
380
+ "loss": 2.1113,
381
+ "step": 530
382
+ },
383
+ {
384
+ "epoch": 0.5299313052011776,
385
+ "grad_norm": 3.6652672290802,
386
+ "learning_rate": 1e-05,
387
+ "loss": 2.605,
388
+ "step": 540
389
+ },
390
+ {
391
+ "epoch": 0.5397448478900884,
392
+ "grad_norm": 1123418.0,
393
+ "learning_rate": 1e-05,
394
+ "loss": 2.367,
395
+ "step": 550
396
+ },
397
+ {
398
+ "epoch": 0.549558390578999,
399
+ "grad_norm": 3.206057071685791,
400
+ "learning_rate": 1e-05,
401
+ "loss": 0.9706,
402
+ "step": 560
403
+ },
404
+ {
405
+ "epoch": 0.5593719332679097,
406
+ "grad_norm": 3.8300833702087402,
407
+ "learning_rate": 1e-05,
408
+ "loss": 1.6688,
409
+ "step": 570
410
+ },
411
+ {
412
+ "epoch": 0.5691854759568205,
413
+ "grad_norm": 3.4160726070404053,
414
+ "learning_rate": 1e-05,
415
+ "loss": 1.8959,
416
+ "step": 580
417
+ },
418
+ {
419
+ "epoch": 0.5789990186457311,
420
+ "grad_norm": 6.991641044616699,
421
+ "learning_rate": 1e-05,
422
+ "loss": 2.8449,
423
+ "step": 590
424
+ },
425
+ {
426
+ "epoch": 0.5888125613346418,
427
+ "grad_norm": 3.89111065864563,
428
+ "learning_rate": 1e-05,
429
+ "loss": 2.8364,
430
+ "step": 600
431
+ },
432
+ {
433
+ "epoch": 0.5986261040235525,
434
+ "grad_norm": 12.52274227142334,
435
+ "learning_rate": 1e-05,
436
+ "loss": 2.3841,
437
+ "step": 610
438
+ },
439
+ {
440
+ "epoch": 0.6084396467124632,
441
+ "grad_norm": 1124655.25,
442
+ "learning_rate": 1e-05,
443
+ "loss": 2.8931,
444
+ "step": 620
445
+ },
446
+ {
447
+ "epoch": 0.6182531894013739,
448
+ "grad_norm": 2132181.75,
449
+ "learning_rate": 1e-05,
450
+ "loss": 1.8265,
451
+ "step": 630
452
+ },
453
+ {
454
+ "epoch": 0.6280667320902846,
455
+ "grad_norm": 3.21681547164917,
456
+ "learning_rate": 1e-05,
457
+ "loss": 0.8137,
458
+ "step": 640
459
+ },
460
+ {
461
+ "epoch": 0.6378802747791953,
462
+ "grad_norm": 1385230.375,
463
+ "learning_rate": 1e-05,
464
+ "loss": 1.2742,
465
+ "step": 650
466
+ },
467
+ {
468
+ "epoch": 0.647693817468106,
469
+ "grad_norm": 10.80539321899414,
470
+ "learning_rate": 1e-05,
471
+ "loss": 3.0502,
472
+ "step": 660
473
+ },
474
+ {
475
+ "epoch": 0.6575073601570167,
476
+ "grad_norm": 1592570.0,
477
+ "learning_rate": 1e-05,
478
+ "loss": 1.9121,
479
+ "step": 670
480
+ },
481
+ {
482
+ "epoch": 0.6673209028459274,
483
+ "grad_norm": 985591.5625,
484
+ "learning_rate": 1e-05,
485
+ "loss": 1.8159,
486
+ "step": 680
487
+ },
488
+ {
489
+ "epoch": 0.677134445534838,
490
+ "grad_norm": 1119573.375,
491
+ "learning_rate": 1e-05,
492
+ "loss": 1.9695,
493
+ "step": 690
494
+ },
495
+ {
496
+ "epoch": 0.6869479882237488,
497
+ "grad_norm": 3.928929090499878,
498
+ "learning_rate": 1e-05,
499
+ "loss": 2.1545,
500
+ "step": 700
501
+ },
502
+ {
503
+ "epoch": 0.6967615309126595,
504
+ "grad_norm": 998297.4375,
505
+ "learning_rate": 1e-05,
506
+ "loss": 1.2963,
507
+ "step": 710
508
+ },
509
+ {
510
+ "epoch": 0.7065750736015701,
511
+ "grad_norm": 3.8201591968536377,
512
+ "learning_rate": 1e-05,
513
+ "loss": 0.9735,
514
+ "step": 720
515
+ },
516
+ {
517
+ "epoch": 0.7163886162904809,
518
+ "grad_norm": 3.7799386978149414,
519
+ "learning_rate": 1e-05,
520
+ "loss": 1.5274,
521
+ "step": 730
522
+ },
523
+ {
524
+ "epoch": 0.7262021589793916,
525
+ "grad_norm": 3.718870162963867,
526
+ "learning_rate": 1e-05,
527
+ "loss": 2.9676,
528
+ "step": 740
529
+ },
530
+ {
531
+ "epoch": 0.7360157016683022,
532
+ "grad_norm": 4.023947715759277,
533
+ "learning_rate": 1e-05,
534
+ "loss": 1.3345,
535
+ "step": 750
536
+ },
537
+ {
538
+ "epoch": 0.745829244357213,
539
+ "grad_norm": 14.283628463745117,
540
+ "learning_rate": 1e-05,
541
+ "loss": 2.7141,
542
+ "step": 760
543
  }
544
  ],
545
  "logging_steps": 10,