dada22231 commited on
Commit
56d10f5
·
verified ·
1 Parent(s): d5415d3

Training in progress, step 75, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:93687b280ee3b57565942dd5a1b28421a8d93bcd46ef3887a93f8fd0bdefedba
3
  size 1521616
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f583834bbce9dff3c4190e4b544b32cc3e6b8aad0c2cc4db78f90c32b494c6d5
3
  size 1521616
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e91d4f88bd79beb1c72af0cdae8374284090306de813be81a6d7814a2419edad
3
  size 3108666
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5f703d7b3e3b461cee90be78974a3149b62f1634412ab8d20eb71e6af8041e7b
3
  size 3108666
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8066a8fce9d55dac9daad2ad7a95eee3ca41a28c3ca97002ba6d884580d00ab6
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:348423016cd468e87bf1055688f8279d584df3402114224ced597d402741a6c2
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:7728bbd8c2b314bc5cc878c06847c81f1886c79d3f48803f64a02a522dcee135
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e423371f7241bbc318ffebfbb1762f075f08a1dfc821d8377483824ee44ae67
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8d98e76b5d6907d288fe29a1b37db6d8c6501718be49c5826034cc22d2baf037
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:225aef233c53e245d3147cba77ccbcd1c9ff6db4d8ad774acf8f7604b5685443
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f991b0b770a9b4b790ae4822e231cee53fd7e3e7642bce4020b2ffa8635a6cd5
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9c55d4b9e6b75c072d7f3861825bd82916294198d7211847ee3bf7eacf37cec4
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:df19ed1a9610a5422497073697cbf4575f80de47fbb46ef0cdd2779386b031fa
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5b53655d80c3ade692dacae57cafa4aff84c325b5cb8d0fba89d01b50d41566
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 8.629364013671875,
3
- "best_model_checkpoint": "miner_id_24/checkpoint-50",
4
- "epoch": 0.03555634569657103,
5
  "eval_steps": 25,
6
- "global_step": 50,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -381,6 +381,189 @@
381
  "eval_samples_per_second": 105.064,
382
  "eval_steps_per_second": 27.317,
383
  "step": 50
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
384
  }
385
  ],
386
  "logging_steps": 1,
@@ -409,7 +592,7 @@
409
  "attributes": {}
410
  }
411
  },
412
- "total_flos": 232013797785600.0,
413
  "train_batch_size": 1,
414
  "trial_name": null,
415
  "trial_params": null
 
1
  {
2
+ "best_metric": 8.485151290893555,
3
+ "best_model_checkpoint": "miner_id_24/checkpoint-75",
4
+ "epoch": 0.05333451854485655,
5
  "eval_steps": 25,
6
+ "global_step": 75,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
381
  "eval_samples_per_second": 105.064,
382
  "eval_steps_per_second": 27.317,
383
  "step": 50
384
+ },
385
+ {
386
+ "epoch": 0.036267472610502456,
387
+ "grad_norm": 1.2948602437973022,
388
+ "learning_rate": 5.192909139858981e-05,
389
+ "loss": 8.3854,
390
+ "step": 51
391
+ },
392
+ {
393
+ "epoch": 0.03697859952443388,
394
+ "grad_norm": 1.202957272529602,
395
+ "learning_rate": 5.0398113297608465e-05,
396
+ "loss": 8.4592,
397
+ "step": 52
398
+ },
399
+ {
400
+ "epoch": 0.037689726438365295,
401
+ "grad_norm": 1.0991284847259521,
402
+ "learning_rate": 4.887250079066892e-05,
403
+ "loss": 8.3654,
404
+ "step": 53
405
+ },
406
+ {
407
+ "epoch": 0.03840085335229672,
408
+ "grad_norm": 1.049133539199829,
409
+ "learning_rate": 4.7354032673710005e-05,
410
+ "loss": 8.5559,
411
+ "step": 54
412
+ },
413
+ {
414
+ "epoch": 0.03911198026622814,
415
+ "grad_norm": 1.0683585405349731,
416
+ "learning_rate": 4.584447941263149e-05,
417
+ "loss": 8.8506,
418
+ "step": 55
419
+ },
420
+ {
421
+ "epoch": 0.03982310718015956,
422
+ "grad_norm": 1.2173759937286377,
423
+ "learning_rate": 4.43456010790099e-05,
424
+ "loss": 8.7193,
425
+ "step": 56
426
+ },
427
+ {
428
+ "epoch": 0.04053423409409098,
429
+ "grad_norm": 1.0298593044281006,
430
+ "learning_rate": 4.285914529793391e-05,
431
+ "loss": 8.8945,
432
+ "step": 57
433
+ },
434
+ {
435
+ "epoch": 0.0412453610080224,
436
+ "grad_norm": 1.0951929092407227,
437
+ "learning_rate": 4.13868452103516e-05,
438
+ "loss": 9.0274,
439
+ "step": 58
440
+ },
441
+ {
442
+ "epoch": 0.04195648792195382,
443
+ "grad_norm": 0.9960741996765137,
444
+ "learning_rate": 3.9930417452305626e-05,
445
+ "loss": 8.904,
446
+ "step": 59
447
+ },
448
+ {
449
+ "epoch": 0.04266761483588524,
450
+ "grad_norm": 1.194457769393921,
451
+ "learning_rate": 3.8491560153412466e-05,
452
+ "loss": 8.9637,
453
+ "step": 60
454
+ },
455
+ {
456
+ "epoch": 0.043378741749816666,
457
+ "grad_norm": 1.4879592657089233,
458
+ "learning_rate": 3.707195095691913e-05,
459
+ "loss": 9.1381,
460
+ "step": 61
461
+ },
462
+ {
463
+ "epoch": 0.04408986866374808,
464
+ "grad_norm": 1.7354305982589722,
465
+ "learning_rate": 3.567324506364632e-05,
466
+ "loss": 8.9569,
467
+ "step": 62
468
+ },
469
+ {
470
+ "epoch": 0.044800995577679505,
471
+ "grad_norm": 1.2308183908462524,
472
+ "learning_rate": 3.4297073302098156e-05,
473
+ "loss": 8.0746,
474
+ "step": 63
475
+ },
476
+ {
477
+ "epoch": 0.04551212249161093,
478
+ "grad_norm": 1.1788955926895142,
479
+ "learning_rate": 3.2945040226989244e-05,
480
+ "loss": 8.4578,
481
+ "step": 64
482
+ },
483
+ {
484
+ "epoch": 0.046223249405542344,
485
+ "grad_norm": 1.1902447938919067,
486
+ "learning_rate": 3.16187222484055e-05,
487
+ "loss": 8.1835,
488
+ "step": 65
489
+ },
490
+ {
491
+ "epoch": 0.04693437631947377,
492
+ "grad_norm": 1.241468906402588,
493
+ "learning_rate": 3.0319665793780648e-05,
494
+ "loss": 8.0961,
495
+ "step": 66
496
+ },
497
+ {
498
+ "epoch": 0.04764550323340519,
499
+ "grad_norm": 1.0362024307250977,
500
+ "learning_rate": 2.9049385504830985e-05,
501
+ "loss": 8.6565,
502
+ "step": 67
503
+ },
504
+ {
505
+ "epoch": 0.048356630147336606,
506
+ "grad_norm": 0.9590556621551514,
507
+ "learning_rate": 2.7809362471550748e-05,
508
+ "loss": 8.8393,
509
+ "step": 68
510
+ },
511
+ {
512
+ "epoch": 0.04906775706126803,
513
+ "grad_norm": 0.9237630367279053,
514
+ "learning_rate": 2.660104250532764e-05,
515
+ "loss": 8.8098,
516
+ "step": 69
517
+ },
518
+ {
519
+ "epoch": 0.04977888397519945,
520
+ "grad_norm": 0.9075753092765808,
521
+ "learning_rate": 2.5425834453191232e-05,
522
+ "loss": 8.8575,
523
+ "step": 70
524
+ },
525
+ {
526
+ "epoch": 0.05049001088913087,
527
+ "grad_norm": 1.0483438968658447,
528
+ "learning_rate": 2.4285108555160577e-05,
529
+ "loss": 8.9505,
530
+ "step": 71
531
+ },
532
+ {
533
+ "epoch": 0.05120113780306229,
534
+ "grad_norm": 1.1041228771209717,
535
+ "learning_rate": 2.3180194846605367e-05,
536
+ "loss": 8.9506,
537
+ "step": 72
538
+ },
539
+ {
540
+ "epoch": 0.051912264716993714,
541
+ "grad_norm": 1.2572849988937378,
542
+ "learning_rate": 2.2112381607484417e-05,
543
+ "loss": 8.8908,
544
+ "step": 73
545
+ },
546
+ {
547
+ "epoch": 0.05262339163092513,
548
+ "grad_norm": 1.5153453350067139,
549
+ "learning_rate": 2.1082913860268765e-05,
550
+ "loss": 9.1148,
551
+ "step": 74
552
+ },
553
+ {
554
+ "epoch": 0.05333451854485655,
555
+ "grad_norm": 2.149052619934082,
556
+ "learning_rate": 2.0092991918301108e-05,
557
+ "loss": 9.13,
558
+ "step": 75
559
+ },
560
+ {
561
+ "epoch": 0.05333451854485655,
562
+ "eval_loss": 8.485151290893555,
563
+ "eval_runtime": 0.4834,
564
+ "eval_samples_per_second": 103.437,
565
+ "eval_steps_per_second": 26.894,
566
+ "step": 75
567
  }
568
  ],
569
  "logging_steps": 1,
 
592
  "attributes": {}
593
  }
594
  },
595
+ "total_flos": 348020696678400.0,
596
  "train_batch_size": 1,
597
  "trial_name": null,
598
  "trial_params": null