RodrigoSalazar-U commited on
Commit
96f7e48
·
verified ·
1 Parent(s): d1ec7d9

Training in progress, step 7500, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3afee2b89c55d138018b573e9a914e6a07453c7de2e6deccf508c92105811902
3
  size 4785762744
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b57bafe229ddc9d396d70b73c5d3f1551beb944434118874bbd2db99ccfa0758
3
  size 4785762744
last-checkpoint/optimizer.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:15bb57db9f3fbc5a6c418e8722464684cda94ed4f1003b4b9db8675541297f5d
3
  size 3497859804
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d4815fbb30c93a76d04b86b348cb01ba704d445bd57e754c90d9516cc171859c
3
  size 3497859804
last-checkpoint/rng_state.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:acc2d65074a2fcc8399dba6d3c0a62d0568496e8b5831e17319a7dcd95d56dc4
3
  size 14308
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56ada7a9076e894e38e63b2e547d8bfe564d09412403a53a9fd4f37e79358b8d
3
  size 14308
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:fc6c5d761a120c0c95ac7582051fc67375b77e8e7cc8898f714b41f2b1ba2779
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd9a25d06c35d5d5739e33f68c959f00eb5b9e3a183683d69b110de9c2694ca6
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
- "epoch": 3.833515881708653,
5
  "eval_steps": 500,
6
- "global_step": 7000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -497,6 +497,41 @@
497
  "learning_rate": 1.5683283458260718e-05,
498
  "loss": 0.2968,
499
  "step": 7000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
500
  }
501
  ],
502
  "logging_steps": 100,
@@ -516,7 +551,7 @@
516
  "attributes": {}
517
  }
518
  },
519
- "total_flos": 3.3642526441134e+18,
520
  "train_batch_size": 16,
521
  "trial_name": null,
522
  "trial_params": null
 
1
  {
2
  "best_metric": null,
3
  "best_model_checkpoint": null,
4
+ "epoch": 4.1073384446878425,
5
  "eval_steps": 500,
6
+ "global_step": 7500,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
497
  "learning_rate": 1.5683283458260718e-05,
498
  "loss": 0.2968,
499
  "step": 7000
500
+ },
501
+ {
502
+ "epoch": 3.888280394304491,
503
+ "grad_norm": 1.968000054359436,
504
+ "learning_rate": 1.4318389436742962e-05,
505
+ "loss": 0.2907,
506
+ "step": 7100
507
+ },
508
+ {
509
+ "epoch": 3.9430449069003286,
510
+ "grad_norm": 1.5500705242156982,
511
+ "learning_rate": 1.3005646664605165e-05,
512
+ "loss": 0.2922,
513
+ "step": 7200
514
+ },
515
+ {
516
+ "epoch": 3.9978094194961664,
517
+ "grad_norm": 1.7522929906845093,
518
+ "learning_rate": 1.1746973810164147e-05,
519
+ "loss": 0.2815,
520
+ "step": 7300
521
+ },
522
+ {
523
+ "epoch": 4.052573932092004,
524
+ "grad_norm": 0.8427908420562744,
525
+ "learning_rate": 1.0544210514649233e-05,
526
+ "loss": 0.1758,
527
+ "step": 7400
528
+ },
529
+ {
530
+ "epoch": 4.1073384446878425,
531
+ "grad_norm": 0.8239675760269165,
532
+ "learning_rate": 9.399114703433688e-06,
533
+ "loss": 0.17,
534
+ "step": 7500
535
  }
536
  ],
537
  "logging_steps": 100,
 
551
  "attributes": {}
552
  }
553
  },
554
+ "total_flos": 3.605124734318199e+18,
555
  "train_batch_size": 16,
556
  "trial_name": null,
557
  "trial_params": null