ben81828 commited on
Commit
c02218d
·
verified ·
1 Parent(s): 15a3eaa

Training in progress, step 3050, checkpoint

Browse files
last-checkpoint/adapter_model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0c6af243f589a55fa9a3c1d1d73c19fe4b53c2830aa75e5338a55fa38ab436d
3
  size 18516456
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ddb314047f6c123f2abaf4b7b141070b2299dee4fc65b9c0ec7277494419776e
3
  size 18516456
last-checkpoint/global_step3049/bf16_zero_pp_rank_0_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7f5f01fb31fdf05d1e57447dec005c951bccf5c68312950d9ba68ed2b72acc3f
3
+ size 27700976
last-checkpoint/global_step3049/bf16_zero_pp_rank_1_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57fb41b9d59335b24ba9718ac6c9fd7127282cbbe0d7b4b41a3649591408f50b
3
+ size 27700976
last-checkpoint/global_step3049/bf16_zero_pp_rank_2_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e952f98dd8858eb80ad86f5a924c0ad5ab72100eb02d784dc6a1b68d87f21e06
3
+ size 27700976
last-checkpoint/global_step3049/bf16_zero_pp_rank_3_mp_rank_00_optim_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a5e81e920bc6cb45f20660b91acbcade620837849e36b76142db5dac6cd1f9cb
3
+ size 27700976
last-checkpoint/global_step3049/zero_pp_rank_0_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8471330bf5a8c63f2b89d46daf4f4aaad340a671bd43e8f5bc9e07681597db2f
3
+ size 411571
last-checkpoint/global_step3049/zero_pp_rank_1_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:76adf22263ee0fa2a57671c2d94a26406cb5e23e4e71ca187da62197af0a3f48
3
+ size 411507
last-checkpoint/global_step3049/zero_pp_rank_2_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2ece78d03d8ad6a00affb15a09e2fd2c6e6f387952a1aa180bc85011e19774a
3
+ size 411507
last-checkpoint/global_step3049/zero_pp_rank_3_mp_rank_00_model_states.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dccca4b860945d3665f2eca85ecde9be91b246c79f8a168bc5072e8f5c567d74
3
+ size 411507
last-checkpoint/latest CHANGED
@@ -1 +1 @@
1
- global_step2999
 
1
+ global_step3049
last-checkpoint/rng_state_0.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6f30f7f3c3620ccd30020e1ea4b81d1a56ee511b742c91370577d55399c14412
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:381f090b12cbb3fcce976bd2e72d07a7786154c2848cc881d75715648e7c4dc2
3
  size 15024
last-checkpoint/rng_state_1.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:440cc1f8e138e1a90606722bd350460b4460991a2f3671f46f880f5743522dca
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4321dc1beb6c245d4a16a8fb2bed2ce2a40d89e5a9c611c4572f63a09523846c
3
  size 15024
last-checkpoint/rng_state_2.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d78d5df466f5b34e85649b90825d4b168464f6d49c668313415473184409b799
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5b79020117e301cd96518c9d1e3eb43a609bc85799f14bac63d9c572fc04cb89
3
  size 15024
last-checkpoint/rng_state_3.pth CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:6d8be6df9e66c8bba407df289d2aa6b4422668727a1e2419037de032213393a4
3
  size 15024
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5429623c7323b4a820ea3d76194bc456c2affeeb1f3af8978aec5aee11b2d1ef
3
  size 15024
last-checkpoint/scheduler.pt CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d143caf68fd1aa97e1a4c3e95f828909a9de86a4e283fcdef513f7e5c5c180e5
3
  size 1064
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65132092aa084390575a45bea99d0dcabb8005a8fae760edf38c6251571f2afa
3
  size 1064
last-checkpoint/trainer_state.json CHANGED
@@ -1,9 +1,9 @@
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
- "epoch": 1.5449394797836724,
5
  "eval_steps": 50,
6
- "global_step": 3000,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
@@ -5347,11 +5347,100 @@
5347
  "eval_steps_per_second": 0.925,
5348
  "num_input_tokens_seen": 35082056,
5349
  "step": 3000
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5350
  }
5351
  ],
5352
  "logging_steps": 5,
5353
  "max_steps": 3400,
5354
- "num_input_tokens_seen": 35082056,
5355
  "num_train_epochs": 2,
5356
  "save_steps": 50,
5357
  "stateful_callbacks": {
@@ -5366,7 +5455,7 @@
5366
  "attributes": {}
5367
  }
5368
  },
5369
- "total_flos": 1970024459010048.0,
5370
  "train_batch_size": 1,
5371
  "trial_name": null,
5372
  "trial_params": null
 
1
  {
2
  "best_metric": 0.6319106221199036,
3
  "best_model_checkpoint": "saves/CADICA_qwenvl_stenosis_classily_scale4_frozenVision/lora/sft/checkpoint-1600",
4
+ "epoch": 1.5706927633273242,
5
  "eval_steps": 50,
6
+ "global_step": 3050,
7
  "is_hyper_param_search": false,
8
  "is_local_process_zero": true,
9
  "is_world_process_zero": true,
 
5347
  "eval_steps_per_second": 0.925,
5348
  "num_input_tokens_seen": 35082056,
5349
  "step": 3000
5350
+ },
5351
+ {
5352
+ "epoch": 1.5475148081380374,
5353
+ "grad_norm": 5.643348291984009,
5354
+ "learning_rate": 3.644856725057405e-06,
5355
+ "loss": 0.2157,
5356
+ "num_input_tokens_seen": 35140568,
5357
+ "step": 3005
5358
+ },
5359
+ {
5360
+ "epoch": 1.5500901364924027,
5361
+ "grad_norm": 6.225693907549098,
5362
+ "learning_rate": 3.554268172739661e-06,
5363
+ "loss": 0.2233,
5364
+ "num_input_tokens_seen": 35199064,
5365
+ "step": 3010
5366
+ },
5367
+ {
5368
+ "epoch": 1.552665464846768,
5369
+ "grad_norm": 5.080945994557626,
5370
+ "learning_rate": 3.4647780688298826e-06,
5371
+ "loss": 0.2951,
5372
+ "num_input_tokens_seen": 35257576,
5373
+ "step": 3015
5374
+ },
5375
+ {
5376
+ "epoch": 1.5552407932011332,
5377
+ "grad_norm": 5.263879934995459,
5378
+ "learning_rate": 3.376388529782215e-06,
5379
+ "loss": 0.2274,
5380
+ "num_input_tokens_seen": 35316064,
5381
+ "step": 3020
5382
+ },
5383
+ {
5384
+ "epoch": 1.5578161215554984,
5385
+ "grad_norm": 5.655349471422181,
5386
+ "learning_rate": 3.2891016460222967e-06,
5387
+ "loss": 0.2479,
5388
+ "num_input_tokens_seen": 35374504,
5389
+ "step": 3025
5390
+ },
5391
+ {
5392
+ "epoch": 1.5603914499098637,
5393
+ "grad_norm": 7.871895425892081,
5394
+ "learning_rate": 3.2029194818977983e-06,
5395
+ "loss": 0.292,
5396
+ "num_input_tokens_seen": 35432984,
5397
+ "step": 3030
5398
+ },
5399
+ {
5400
+ "epoch": 1.5629667782642287,
5401
+ "grad_norm": 6.441418084723481,
5402
+ "learning_rate": 3.117844075629617e-06,
5403
+ "loss": 0.241,
5404
+ "num_input_tokens_seen": 35491488,
5405
+ "step": 3035
5406
+ },
5407
+ {
5408
+ "epoch": 1.5655421066185937,
5409
+ "grad_norm": 5.268339109046189,
5410
+ "learning_rate": 3.033877439263666e-06,
5411
+ "loss": 0.228,
5412
+ "num_input_tokens_seen": 35549984,
5413
+ "step": 3040
5414
+ },
5415
+ {
5416
+ "epoch": 1.568117434972959,
5417
+ "grad_norm": 7.110464304213341,
5418
+ "learning_rate": 2.951021558623274e-06,
5419
+ "loss": 0.2485,
5420
+ "num_input_tokens_seen": 35608488,
5421
+ "step": 3045
5422
+ },
5423
+ {
5424
+ "epoch": 1.5706927633273242,
5425
+ "grad_norm": 12.567694093056492,
5426
+ "learning_rate": 2.869278393262226e-06,
5427
+ "loss": 0.2851,
5428
+ "num_input_tokens_seen": 35666976,
5429
+ "step": 3050
5430
+ },
5431
+ {
5432
+ "epoch": 1.5706927633273242,
5433
+ "eval_loss": 0.8473746180534363,
5434
+ "eval_runtime": 16.0314,
5435
+ "eval_samples_per_second": 3.743,
5436
+ "eval_steps_per_second": 0.936,
5437
+ "num_input_tokens_seen": 35666976,
5438
+ "step": 3050
5439
  }
5440
  ],
5441
  "logging_steps": 5,
5442
  "max_steps": 3400,
5443
+ "num_input_tokens_seen": 35666976,
5444
  "num_train_epochs": 2,
5445
  "save_steps": 50,
5446
  "stateful_callbacks": {
 
5455
  "attributes": {}
5456
  }
5457
  },
5458
+ "total_flos": 2002870983983104.0,
5459
  "train_batch_size": 1,
5460
  "trial_name": null,
5461
  "trial_params": null