ben81828 commited on
Commit
a644ec5
·
verified ·
1 Parent(s): 082e7a9

Training in progress, step 1650, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f11513d52bcbabbebdae5c22382754c386d07651a25539e1cdcc0f4b4982a0f7
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:919717a8f83cc7597c4b3c1c6f8d6f3d9da7bb13e3737cb159de4e4e604b9ef9
3
  size 18516456
last-checkpoint/global_step1650/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dc3d2bcd74c2289546af424c3ccef41a3590dee4d98692dc7035cb770ec6440c
3
+ size 27700976
last-checkpoint/global_step1650/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bc6c1583f6d8a8f5ea608d477928945261e94155b200c9d5296d88728ae362e7
3
+ size 27700976
last-checkpoint/global_step1650/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32b4ffd70ff27d3f3556fb95a47f832ac49820396d6cf7a0e5be6d97cd0bd157
3
+ size 27700976
last-checkpoint/global_step1650/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cc9303ec814fb09b35b1d5a95b7b6bf4117a5288d03c0b091aae039b16f327e9
3
+ size 27700976
last-checkpoint/global_step1650/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86a71feee743879831ed6c178773eb14d5a9a8462d065c3a323d963c0abaf6d3
3
+ size 411571
last-checkpoint/global_step1650/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:420d090db4c38cc554ab73a50666f44378418f6870378edf1f5744cc992f4f60
3
+ size 411507
last-checkpoint/global_step1650/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4ac1e6c793af99eadc9a824e76c6395926948aa5ed3496c3fcfaac1a0b73cca5
3
+ size 411507
last-checkpoint/global_step1650/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:63a910d523c55d0b02759778eba3d3644d4af2bd51a9d3fcecee59a7b97a5399
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step1600
 
1
+ global_step1650
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9279ed4b01716237e789d2631c1f29bc5d43c5633c014d4401de21b672c1b355
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a90384755f5b036b42b1a465b39dbf24a925a02c04294f9d684bc1de7f4db1e5
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:ca1990d68e57c70df5c56d395dd3f3befbe07b380521f4144677c20f6fe2a3eb
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7621b41e55056218f97d5b32ae116de3304a677b9f27b6a62170d83a2bbff176
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:e0790066885525e1b9a9390a40ae27abd57abb47f031abface27890732f9e684
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:997e9debadfd125b5c8b66ee6dd79ced3d40d353ff9250475f3814fd950012a6
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1325a2034fe48ebad4f00ac8a2b32ab5c4c43c2497712169a8e3b1112363d916
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:0f0256032419959580948d742425f66782bc8eb029126a091669a42c6ee0eba4
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:f2487a6c511ed8055eb0842d87966b09ae8b62c1b4514727282ca413d6e9c4e2
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a67704d17501733446a3916f5d09739e8293f473d2f1a5f63158f87200fdaa72
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 0.8241050733968581,
5
  "eval_steps": 50,
6
- "global_step": 1600,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -2855,11 +2855,100 @@
2855
  "eval_steps_per_second": 0.936,
2856
  "num_input_tokens_seen": 18714072,
2857
  "step": 1600
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
2858
  }
2859
  ],
2860
  "logging_steps": 5,
2861
  "max_steps": 3400,
2862
- "num_input_tokens_seen": 18714072,
2863
  "num_train_epochs": 2,
2864
  "save_steps": 50,
2865
  "stateful_callbacks": {
@@ -2874,7 +2963,7 @@
2874
  "attributes": {}
2875
  }
2876
  },
2877
- "total_flos": 1050836028358656.0,
2878
  "train_batch_size": 1,
2879
  "trial_name": null,
2880
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 0.8498583569405099,
5
  "eval_steps": 50,
6
+ "global_step": 1650,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
2855
  "eval_steps_per_second": 0.936,
2856
  "num_input_tokens_seen": 18714072,
2857
  "step": 1600
2858
+ },
2859
+ {
2860
+ "epoch": 0.8266804017512233,
2861
+ "grad_norm": 3.4115030121534953,
2862
+ "learning_rate": 5.8709014638958404e-05,
2863
+ "loss": 0.6095,
2864
+ "num_input_tokens_seen": 18772552,
2865
+ "step": 1605
2866
+ },
2867
+ {
2868
+ "epoch": 0.8292557301055885,
2869
+ "grad_norm": 2.8584050529867895,
2870
+ "learning_rate": 5.846947222863123e-05,
2871
+ "loss": 0.5896,
2872
+ "num_input_tokens_seen": 18830992,
2873
+ "step": 1610
2874
+ },
2875
+ {
2876
+ "epoch": 0.8318310584599536,
2877
+ "grad_norm": 3.083134826868609,
2878
+ "learning_rate": 5.8229729514036705e-05,
2879
+ "loss": 0.545,
2880
+ "num_input_tokens_seen": 18889480,
2881
+ "step": 1615
2882
+ },
2883
+ {
2884
+ "epoch": 0.8344063868143188,
2885
+ "grad_norm": 3.5650772646006703,
2886
+ "learning_rate": 5.7989792165125356e-05,
2887
+ "loss": 0.6021,
2888
+ "num_input_tokens_seen": 18947936,
2889
+ "step": 1620
2890
+ },
2891
+ {
2892
+ "epoch": 0.836981715168684,
2893
+ "grad_norm": 3.1787537764025737,
2894
+ "learning_rate": 5.774966585645092e-05,
2895
+ "loss": 0.5741,
2896
+ "num_input_tokens_seen": 19006432,
2897
+ "step": 1625
2898
+ },
2899
+ {
2900
+ "epoch": 0.8395570435230492,
2901
+ "grad_norm": 4.505205596087594,
2902
+ "learning_rate": 5.7509356267035975e-05,
2903
+ "loss": 0.5796,
2904
+ "num_input_tokens_seen": 19064920,
2905
+ "step": 1630
2906
+ },
2907
+ {
2908
+ "epoch": 0.8421323718774144,
2909
+ "grad_norm": 3.854433226263906,
2910
+ "learning_rate": 5.726886908023776e-05,
2911
+ "loss": 0.5088,
2912
+ "num_input_tokens_seen": 19123376,
2913
+ "step": 1635
2914
+ },
2915
+ {
2916
+ "epoch": 0.8447077002317795,
2917
+ "grad_norm": 3.5910960304247643,
2918
+ "learning_rate": 5.702820998361373e-05,
2919
+ "loss": 0.5431,
2920
+ "num_input_tokens_seen": 19181864,
2921
+ "step": 1640
2922
+ },
2923
+ {
2924
+ "epoch": 0.8472830285861447,
2925
+ "grad_norm": 4.55639282269759,
2926
+ "learning_rate": 5.6787384668786994e-05,
2927
+ "loss": 0.5849,
2928
+ "num_input_tokens_seen": 19240352,
2929
+ "step": 1645
2930
+ },
2931
+ {
2932
+ "epoch": 0.8498583569405099,
2933
+ "grad_norm": 4.031478721616991,
2934
+ "learning_rate": 5.654639883131178e-05,
2935
+ "loss": 0.5668,
2936
+ "num_input_tokens_seen": 19298848,
2937
+ "step": 1650
2938
+ },
2939
+ {
2940
+ "epoch": 0.8498583569405099,
2941
+ "eval_loss": 0.6634677648544312,
2942
+ "eval_runtime": 16.0267,
2943
+ "eval_samples_per_second": 3.744,
2944
+ "eval_steps_per_second": 0.936,
2945
+ "num_input_tokens_seen": 19298848,
2946
+ "step": 1650
2947
  }
2948
  ],
2949
  "logging_steps": 5,
2950
  "max_steps": 3400,
2951
+ "num_input_tokens_seen": 19298848,
2952
  "num_train_epochs": 2,
2953
  "save_steps": 50,
2954
  "stateful_callbacks": {
 
2963
  "attributes": {}
2964
  }
2965
  },
2966
+ "total_flos": 1083674466975744.0,
2967
  "train_batch_size": 1,
2968
  "trial_name": null,
2969
  "trial_params": null