ben81828 commited on
Commit
bd4436b
·
verified ·
1 Parent(s): 5e9f3b8

Training in progress, step 950, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:84586103a211c06663bd404e55c4d764d0a39628f772147e29017239dc0ff834
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ed912ae9f29651ceebc41713ab5b91988158a52f69ac82baf6102389d31521a7
3
  size 29034840
last-checkpoint/global_step950/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1b809ff8945c89100055098c0e58e465735de8a9f55ea94fa5fcdd14223305ef
3
+ size 43429616
last-checkpoint/global_step950/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d260f3afadca21e3a4fdba818360a0d7ec292faf80eef232e413b07af69ffc10
3
+ size 43429616
last-checkpoint/global_step950/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e642dbc5359877c528369fb0e55fd945f101471de9fa1da28289e1ea1879197e
3
+ size 43429616
last-checkpoint/global_step950/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93d3075ceb4a740b9de02bd73f8b6e74568b2448b5e5d20bdfc72aefd6a43ab0
3
+ size 43429616
last-checkpoint/global_step950/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:de8d2452415f67f6cb314134176f636d47ded9477de6e168c2925a95f1e50399
3
+ size 637299
last-checkpoint/global_step950/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:59a0c15d11bab9ab36ba888fa09e8a77ddc15c19181e193c722b9f734d07accd
3
+ size 637171
last-checkpoint/global_step950/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8010ba1747feb31117a6b69f86638d1fc0741ad088c9024682efeba498962221
3
+ size 637171
last-checkpoint/global_step950/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:39c9dd240d6e20c6b0f5d0f32f43209c894bb5b7adbab13d992cf542fcdcdb1c
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step900
 
1
+ global_step950
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:b8f22ced19e790cc864cefe3b7c711d9ae631c44f95d42fb4829688cc3de0153
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f2439da621f14c22b4f733e91bfc9de6b506d28d7b8d6f3eaca2e0b4f24c078
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:3e0407513eba77d34cbf3adf0e59a58bd80716f4f00f414854253637e82be43d
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c9e3fb386557f376b8946af5b8c91f9418f374dddb2ad9da4868b1ef16778c32
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6060636c023258ce9b965e244b8a58b4c99d5784dde4405b39737550ef50cd4f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc7774d06045635bece9e960378fdc6913bf7bbbc903444cc570d1ca6ac25645
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c24ccdfdcde39cb2265c82c50c36ffdfcc670f757aba4bcf4bb0fdc6d1373c4c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d98c54a80a914fecf43d06ea81432499f46e70664f1d04651bf339163e30fa9e
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:feb4015894f59edc29c71bc4938b5d4ab98daad34a38a3d387b308b3b1d4b280
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f224baf5bd2044314606c1d88f84cce32f1b37c43c15835b14e72f6a72a4fc
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
- "epoch": 0.4635591037857327,
5
  "eval_steps": 50,
6
- "global_step": 900,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -1609,11 +1609,100 @@
1609
  "eval_steps_per_second": 0.778,
1610
  "num_input_tokens_seen": 8985600,
1611
  "step": 900
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1612
  }
1613
  ],
1614
  "logging_steps": 5,
1615
  "max_steps": 3400,
1616
- "num_input_tokens_seen": 8985600,
1617
  "num_train_epochs": 2,
1618
  "save_steps": 50,
1619
  "stateful_callbacks": {
@@ -1628,7 +1717,7 @@
1628
  "attributes": {}
1629
  }
1630
  },
1631
- "total_flos": 592864460668928.0,
1632
  "train_batch_size": 1,
1633
  "trial_name": null,
1634
  "trial_params": null
 
1
  {
2
  "best_metric": 0.0033526704646646976,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_direction_scale4/lora/sft/checkpoint-550",
4
+ "epoch": 0.4893123873293845,
5
  "eval_steps": 50,
6
+ "global_step": 950,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
1609
  "eval_steps_per_second": 0.778,
1610
  "num_input_tokens_seen": 8985600,
1611
  "step": 900
1612
+ },
1613
+ {
1614
+ "epoch": 0.46613443214009787,
1615
+ "grad_norm": 2.090931980208863,
1616
+ "learning_rate": 8.775851227988656e-05,
1617
+ "loss": 0.0318,
1618
+ "num_input_tokens_seen": 9035520,
1619
+ "step": 905
1620
+ },
1621
+ {
1622
+ "epoch": 0.46870976049446306,
1623
+ "grad_norm": 1.7373295104389102,
1624
+ "learning_rate": 8.759866980070963e-05,
1625
+ "loss": 0.0635,
1626
+ "num_input_tokens_seen": 9085440,
1627
+ "step": 910
1628
+ },
1629
+ {
1630
+ "epoch": 0.47128508884882825,
1631
+ "grad_norm": 0.1557668148789241,
1632
+ "learning_rate": 8.743793810744654e-05,
1633
+ "loss": 0.0035,
1634
+ "num_input_tokens_seen": 9135360,
1635
+ "step": 915
1636
+ },
1637
+ {
1638
+ "epoch": 0.4738604172031934,
1639
+ "grad_norm": 0.5241949871459053,
1640
+ "learning_rate": 8.727632100142551e-05,
1641
+ "loss": 0.0047,
1642
+ "num_input_tokens_seen": 9185280,
1643
+ "step": 920
1644
+ },
1645
+ {
1646
+ "epoch": 0.4764357455575586,
1647
+ "grad_norm": 2.329131853129593,
1648
+ "learning_rate": 8.711382230491493e-05,
1649
+ "loss": 0.0194,
1650
+ "num_input_tokens_seen": 9235200,
1651
+ "step": 925
1652
+ },
1653
+ {
1654
+ "epoch": 0.47901107391192377,
1655
+ "grad_norm": 1.0158880737206768,
1656
+ "learning_rate": 8.695044586103296e-05,
1657
+ "loss": 0.0234,
1658
+ "num_input_tokens_seen": 9285120,
1659
+ "step": 930
1660
+ },
1661
+ {
1662
+ "epoch": 0.48158640226628896,
1663
+ "grad_norm": 0.016729230547496875,
1664
+ "learning_rate": 8.678619553365659e-05,
1665
+ "loss": 0.0253,
1666
+ "num_input_tokens_seen": 9335040,
1667
+ "step": 935
1668
+ },
1669
+ {
1670
+ "epoch": 0.48416173062065415,
1671
+ "grad_norm": 2.937987778998638,
1672
+ "learning_rate": 8.662107520733027e-05,
1673
+ "loss": 0.0191,
1674
+ "num_input_tokens_seen": 9384960,
1675
+ "step": 940
1676
+ },
1677
+ {
1678
+ "epoch": 0.4867370589750193,
1679
+ "grad_norm": 0.20640046184009653,
1680
+ "learning_rate": 8.64550887871741e-05,
1681
+ "loss": 0.0469,
1682
+ "num_input_tokens_seen": 9434880,
1683
+ "step": 945
1684
+ },
1685
+ {
1686
+ "epoch": 0.4893123873293845,
1687
+ "grad_norm": 1.9952606902760353,
1688
+ "learning_rate": 8.628824019879137e-05,
1689
+ "loss": 0.0415,
1690
+ "num_input_tokens_seen": 9484800,
1691
+ "step": 950
1692
+ },
1693
+ {
1694
+ "epoch": 0.4893123873293845,
1695
+ "eval_loss": 0.007216573692858219,
1696
+ "eval_runtime": 19.2716,
1697
+ "eval_samples_per_second": 3.113,
1698
+ "eval_steps_per_second": 0.778,
1699
+ "num_input_tokens_seen": 9484800,
1700
+ "step": 950
1701
  }
1702
  ],
1703
  "logging_steps": 5,
1704
  "max_steps": 3400,
1705
+ "num_input_tokens_seen": 9484800,
1706
  "num_train_epochs": 2,
1707
  "save_steps": 50,
1708
  "stateful_callbacks": {
 
1717
  "attributes": {}
1718
  }
1719
  },
1720
+ "total_flos": 625804440240128.0,
1721
  "train_batch_size": 1,
1722
  "trial_name": null,
1723
  "trial_params": null