ben81828 commited on
Commit
a10a3e1
·
verified ·
1 Parent(s): 72c8b3c

Training in progress, step 600, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:8ea0b12291caef0384f7fd3bc0b1e4fc7815f7b867e0e565a267ef13238fd6a9
3
  size 29034840
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:69777f5aa25eff25d556b3b21e25927428c8db95972e0d7a65589f133ec91630
3
  size 29034840
last-checkpoint/global_step600/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ab80c15906861e1236c44dc13b999d5885040dfb18b620b583a1a8ce6b76cf77
3
+ size 43429616
last-checkpoint/global_step600/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:16c148b81bbff198e621c2384ce6fb1153bf8f85f2600cf26d42d80859a76e13
3
+ size 43429616
last-checkpoint/global_step600/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a07f6b032817ea26e5a51a4cd0ad95284e861fc5c633421968ba2ba8628465d8
3
+ size 43429616
last-checkpoint/global_step600/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8d6c344741ddb3bf2afcd556430bff12e3a8d7b5fd0cda080af2d224555f0af1
3
+ size 43429616
last-checkpoint/global_step600/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:11e8027e9407df0ac39e2b3b0f9b391ceffcc0365bf7b524d551f5dbe3c76e79
3
+ size 637299
last-checkpoint/global_step600/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c20dbc40aa3a9738159e35e517f2b9b468c6b1bee8cb810efadc972e0821f0fc
3
+ size 637171
last-checkpoint/global_step600/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:585439023d752dde2525d7bdccd458f3fb79989825110ba63e0cce828093c299
3
+ size 637171
last-checkpoint/global_step600/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e197c45213ad104a71f307e0f73afc54979e8843c65bd3e2a9fc618c43b98f06
3
+ size 637171
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step550
 
1
+ global_step600
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ae78313eb528c8d3695eebaf4de3539bd0a0bc6ee18c66af1ee183442f1758a0
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a81e3916b1392c4c49afb171dee5415c15f5a5a5af8749b28195fcfa0596699c
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1b38031f60d9e88601d369ef46bcdcf2b5b03f2cb4ba93853bcb2328df7ebb7c
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9a781038dd714b87b8adb1aac8dbc8217ceb607428a992133954ad522365236e
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f58092375c93d237cd0e3149aecfbf83e2acdae46279e07a32920d01cb507e64
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9446c3db15f382a5546f13622787fc99392a5e0bc8a9ca2da1838de7ab621a37
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:83cd4bbff9962da7ec6787fcea8d65df7096917f9a5902e249ba7aee8887fe5f
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f11e7a6b3faa884fc23044e3772ff9dd72c257f02e121665061e2a03d518bd9
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:48642e777392e25274bb934c3caefd33d14bddceae2e006daf244ac2f6537412
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f84ae20af7f03e6aebf5ce5f2f22e99f298d925ddedf6c1ee5005301cfd997b7
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
- "best_metric": 0.6505001187324524,
3
- "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-500",
4
- "epoch": 0.16248153618906944,
5
  "eval_steps": 50,
6
- "global_step": 550,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -986,11 +986,100 @@
986
  "eval_steps_per_second": 0.782,
987
  "num_input_tokens_seen": 5703016,
988
  "step": 550
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
989
  }
990
  ],
991
  "logging_steps": 5,
992
  "max_steps": 6770,
993
- "num_input_tokens_seen": 5703016,
994
  "num_train_epochs": 2,
995
  "save_steps": 50,
996
  "stateful_callbacks": {
@@ -1005,7 +1094,7 @@
1005
  "attributes": {}
1006
  }
1007
  },
1008
- "total_flos": 376097589690368.0,
1009
  "train_batch_size": 1,
1010
  "trial_name": null,
1011
  "trial_params": null
 
1
  {
2
+ "best_metric": 0.5883122682571411,
3
+ "best_model_checkpoint": "saves/CADICA_qwenvl_direction_then_DetectAndClassify_scale6/lora/sft/checkpoint-600",
4
+ "epoch": 0.17725258493353027,
5
  "eval_steps": 50,
6
+ "global_step": 600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
986
  "eval_steps_per_second": 0.782,
987
  "num_input_tokens_seen": 5703016,
988
  "step": 550
989
+ },
990
+ {
991
+ "epoch": 0.16395864106351551,
992
+ "grad_norm": 10.880382658420737,
993
+ "learning_rate": 9.972190879892147e-05,
994
+ "loss": 0.6076,
995
+ "num_input_tokens_seen": 5754192,
996
+ "step": 555
997
+ },
998
+ {
999
+ "epoch": 0.1654357459379616,
1000
+ "grad_norm": 5.9115707757479345,
1001
+ "learning_rate": 9.970889784653033e-05,
1002
+ "loss": 0.6136,
1003
+ "num_input_tokens_seen": 5806272,
1004
+ "step": 560
1005
+ },
1006
+ {
1007
+ "epoch": 0.16691285081240767,
1008
+ "grad_norm": 8.300559629359741,
1009
+ "learning_rate": 9.969559033135318e-05,
1010
+ "loss": 0.5554,
1011
+ "num_input_tokens_seen": 5858632,
1012
+ "step": 565
1013
+ },
1014
+ {
1015
+ "epoch": 0.16838995568685378,
1016
+ "grad_norm": 19.24269810236072,
1017
+ "learning_rate": 9.96819863327825e-05,
1018
+ "loss": 0.5847,
1019
+ "num_input_tokens_seen": 5909936,
1020
+ "step": 570
1021
+ },
1022
+ {
1023
+ "epoch": 0.16986706056129985,
1024
+ "grad_norm": 2.997295434716295,
1025
+ "learning_rate": 9.966808593197959e-05,
1026
+ "loss": 0.6217,
1027
+ "num_input_tokens_seen": 5961464,
1028
+ "step": 575
1029
+ },
1030
+ {
1031
+ "epoch": 0.17134416543574593,
1032
+ "grad_norm": 8.454212007467431,
1033
+ "learning_rate": 9.965388921187413e-05,
1034
+ "loss": 0.5569,
1035
+ "num_input_tokens_seen": 6013696,
1036
+ "step": 580
1037
+ },
1038
+ {
1039
+ "epoch": 0.172821270310192,
1040
+ "grad_norm": 11.728020547911296,
1041
+ "learning_rate": 9.963939625716361e-05,
1042
+ "loss": 0.5894,
1043
+ "num_input_tokens_seen": 6065736,
1044
+ "step": 585
1045
+ },
1046
+ {
1047
+ "epoch": 0.17429837518463812,
1048
+ "grad_norm": 20.470288976160585,
1049
+ "learning_rate": 9.962460715431284e-05,
1050
+ "loss": 0.5783,
1051
+ "num_input_tokens_seen": 6118400,
1052
+ "step": 590
1053
+ },
1054
+ {
1055
+ "epoch": 0.1757754800590842,
1056
+ "grad_norm": 4.675971808784723,
1057
+ "learning_rate": 9.960952199155347e-05,
1058
+ "loss": 0.5657,
1059
+ "num_input_tokens_seen": 6171120,
1060
+ "step": 595
1061
+ },
1062
+ {
1063
+ "epoch": 0.17725258493353027,
1064
+ "grad_norm": 9.775804001092958,
1065
+ "learning_rate": 9.959414085888342e-05,
1066
+ "loss": 0.6331,
1067
+ "num_input_tokens_seen": 6222736,
1068
+ "step": 600
1069
+ },
1070
+ {
1071
+ "epoch": 0.17725258493353027,
1072
+ "eval_loss": 0.5883122682571411,
1073
+ "eval_runtime": 19.002,
1074
+ "eval_samples_per_second": 3.158,
1075
+ "eval_steps_per_second": 0.789,
1076
+ "num_input_tokens_seen": 6222736,
1077
+ "step": 600
1078
  }
1079
  ],
1080
  "logging_steps": 5,
1081
  "max_steps": 6770,
1082
+ "num_input_tokens_seen": 6222736,
1083
  "num_train_epochs": 2,
1084
  "save_steps": 50,
1085
  "stateful_callbacks": {
 
1094
  "attributes": {}
1095
  }
1096
  },
1097
+ "total_flos": 410420689764352.0,
1098
  "train_batch_size": 1,
1099
  "trial_name": null,
1100
  "trial_params": null