Rakhman16 commited on
Commit
3f5a8e0
1 Parent(s): abd9b90

Training in progress, step 12500, checkpoint

Browse files
last-checkpoint/model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:12d89b6cfeb0fa2a639fb1d022803e910e636a1653929f3379ba8ecc07936d2d
3
  size 891558696
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:84809acf96301d8b75572b9aa608e1f2afc0533f5ee30f46a0431492a1a275f4
3
  size 891558696
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:16e8ca890c29387dcb94fe4fee166151a4647fb651f909dbd97850a259675cfe
3
  size 1783272762
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:984c2e60f40d6b8ecdf43a9a9b2fa6e7fed8a14485e9250dde4344fd78072844
3
  size 1783272762
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8eee71f4c759651379c503d3028bec932d355f171dd7453ec6f5c469e966f747
3
  size 14244
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:96d212c2076c947a58295d88ed7a741f064eeda3b1a76832bac43470f08461ef
3
  size 14244
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b63e11db1a8e7c7a242100e7b3a9500ec8f1ad290a19c61a227cd5ed6d79dcc2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cb61de87e596461b3a2fd8b9e3ff4ac6a0ba9feb11b3f41b64bad2f67194d86a
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.10353059321641922,
3
- "best_model_checkpoint": "./fine-tuned/checkpoint-12000",
4
- "epoch": 2.107666637393519,
5
  "eval_steps": 100,
6
- "global_step": 12000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2647,6 +2647,116 @@
2647
  "eval_samples_per_second": 25.43,
2648
  "eval_steps_per_second": 3.182,
2649
  "step": 12000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2650
  }
2651
  ],
2652
  "logging_steps": 50,
@@ -2666,7 +2776,7 @@
2666
  "attributes": {}
2667
  }
2668
  },
2669
- "total_flos": 5.845569478852608e+16,
2670
  "train_batch_size": 8,
2671
  "trial_name": null,
2672
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.10325244069099426,
3
+ "best_model_checkpoint": "./fine-tuned/checkpoint-12500",
4
+ "epoch": 2.195486080618249,
5
  "eval_steps": 100,
6
+ "global_step": 12500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2647
  "eval_samples_per_second": 25.43,
2648
  "eval_steps_per_second": 3.182,
2649
  "step": 12000
2650
+ },
2651
+ {
2652
+ "epoch": 2.116448581715992,
2653
+ "grad_norm": 10527.5751953125,
2654
+ "learning_rate": 1.4125241524679432e-05,
2655
+ "loss": 0.0997,
2656
+ "step": 12050
2657
+ },
2658
+ {
2659
+ "epoch": 2.125230526038465,
2660
+ "grad_norm": 12505.5380859375,
2661
+ "learning_rate": 1.4059371157561918e-05,
2662
+ "loss": 0.0981,
2663
+ "step": 12100
2664
+ },
2665
+ {
2666
+ "epoch": 2.125230526038465,
2667
+ "eval_loss": 0.10333307832479477,
2668
+ "eval_runtime": 175.2906,
2669
+ "eval_samples_per_second": 25.443,
2670
+ "eval_steps_per_second": 3.183,
2671
+ "step": 12100
2672
+ },
2673
+ {
2674
+ "epoch": 2.134012470360938,
2675
+ "grad_norm": 9851.1923828125,
2676
+ "learning_rate": 1.3993500790444406e-05,
2677
+ "loss": 0.1006,
2678
+ "step": 12150
2679
+ },
2680
+ {
2681
+ "epoch": 2.142794414683411,
2682
+ "grad_norm": 9354.9697265625,
2683
+ "learning_rate": 1.3927630423326893e-05,
2684
+ "loss": 0.1032,
2685
+ "step": 12200
2686
+ },
2687
+ {
2688
+ "epoch": 2.142794414683411,
2689
+ "eval_loss": 0.10333764553070068,
2690
+ "eval_runtime": 175.147,
2691
+ "eval_samples_per_second": 25.464,
2692
+ "eval_steps_per_second": 3.186,
2693
+ "step": 12200
2694
+ },
2695
+ {
2696
+ "epoch": 2.151576359005884,
2697
+ "grad_norm": 7880.865234375,
2698
+ "learning_rate": 1.3861760056209381e-05,
2699
+ "loss": 0.0957,
2700
+ "step": 12250
2701
+ },
2702
+ {
2703
+ "epoch": 2.160358303328357,
2704
+ "grad_norm": 17636.8515625,
2705
+ "learning_rate": 1.3795889689091866e-05,
2706
+ "loss": 0.0952,
2707
+ "step": 12300
2708
+ },
2709
+ {
2710
+ "epoch": 2.160358303328357,
2711
+ "eval_loss": 0.10335990786552429,
2712
+ "eval_runtime": 175.3211,
2713
+ "eval_samples_per_second": 25.439,
2714
+ "eval_steps_per_second": 3.183,
2715
+ "step": 12300
2716
+ },
2717
+ {
2718
+ "epoch": 2.1691402476508297,
2719
+ "grad_norm": 15586.3701171875,
2720
+ "learning_rate": 1.3730019321974355e-05,
2721
+ "loss": 0.098,
2722
+ "step": 12350
2723
+ },
2724
+ {
2725
+ "epoch": 2.1779221919733027,
2726
+ "grad_norm": 11448.01953125,
2727
+ "learning_rate": 1.3664148954856841e-05,
2728
+ "loss": 0.0942,
2729
+ "step": 12400
2730
+ },
2731
+ {
2732
+ "epoch": 2.1779221919733027,
2733
+ "eval_loss": 0.10320650041103363,
2734
+ "eval_runtime": 175.2718,
2735
+ "eval_samples_per_second": 25.446,
2736
+ "eval_steps_per_second": 3.184,
2737
+ "step": 12400
2738
+ },
2739
+ {
2740
+ "epoch": 2.1867041362957758,
2741
+ "grad_norm": 13402.5732421875,
2742
+ "learning_rate": 1.359827858773933e-05,
2743
+ "loss": 0.1016,
2744
+ "step": 12450
2745
+ },
2746
+ {
2747
+ "epoch": 2.195486080618249,
2748
+ "grad_norm": 7308.1123046875,
2749
+ "learning_rate": 1.3532408220621816e-05,
2750
+ "loss": 0.0978,
2751
+ "step": 12500
2752
+ },
2753
+ {
2754
+ "epoch": 2.195486080618249,
2755
+ "eval_loss": 0.10325244069099426,
2756
+ "eval_runtime": 175.4131,
2757
+ "eval_samples_per_second": 25.426,
2758
+ "eval_steps_per_second": 3.181,
2759
+ "step": 12500
2760
  }
2761
  ],
2762
  "logging_steps": 50,
 
2776
  "attributes": {}
2777
  }
2778
  },
2779
+ "total_flos": 6.089152635076608e+16,
2780
  "train_batch_size": 8,
2781
  "trial_name": null,
2782
  "trial_params": null