kevinoli commited on
Commit
1a8c846
1 Parent(s): f6b6e0a

Training in progress, step 2500, checkpoint

Browse files
checkpoint-2500/config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "openai/clip-vit-large-patch14-336",
3
+ "architectures": [
4
+ "CLIPModel"
5
+ ],
6
+ "initializer_factor": 1.0,
7
+ "logit_scale_init_value": 2.6592,
8
+ "model_type": "clip",
9
+ "projection_dim": 768,
10
+ "text_config": {
11
+ "dropout": 0.0,
12
+ "hidden_size": 768,
13
+ "intermediate_size": 3072,
14
+ "model_type": "clip_text_model",
15
+ "num_attention_heads": 12,
16
+ "projection_dim": 768
17
+ },
18
+ "torch_dtype": "float32",
19
+ "transformers_version": "4.45.0.dev0",
20
+ "vision_config": {
21
+ "dropout": 0.0,
22
+ "hidden_size": 1024,
23
+ "image_size": 336,
24
+ "intermediate_size": 4096,
25
+ "model_type": "clip_vision_model",
26
+ "num_attention_heads": 16,
27
+ "num_hidden_layers": 24,
28
+ "patch_size": 14,
29
+ "projection_dim": 768
30
+ }
31
+ }
checkpoint-2500/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:98ec3b2a6dd3771786ae2b0d7249894cb75f3ce76f2b67770f873a3c341897d7
3
+ size 1711848436
checkpoint-2500/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93207b35f2dedfea609106b703ca54f5cb4ac231873997fec348e7de8276155a
3
+ size 3424043887
checkpoint-2500/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cd0cc501af0df1601264672dbfc6cf6040fc01ccafb00b41ccca22f726fbadf9
3
+ size 14503
checkpoint-2500/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:38cc1d728ee7055a8f73f44454f77a6c426f477f419257cb327eda4a0d247ef3
3
+ size 623
checkpoint-2500/trainer_state.json ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 1.4585391283035278,
3
+ "best_model_checkpoint": "./output/clip-finetuned-csu-p14-336-e4l59-l/checkpoint-2500",
4
+ "epoch": 0.46057479734708917,
5
+ "eval_steps": 500,
6
+ "global_step": 2500,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.09211495946941783,
13
+ "grad_norm": 20.952665328979492,
14
+ "learning_rate": 4.884856300663228e-09,
15
+ "loss": 0.3952,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.09211495946941783,
20
+ "eval_loss": 1.4940427541732788,
21
+ "eval_runtime": 73.9282,
22
+ "eval_samples_per_second": 16.327,
23
+ "eval_steps_per_second": 2.043,
24
+ "step": 500
25
+ },
26
+ {
27
+ "epoch": 0.18422991893883567,
28
+ "grad_norm": 59.28666687011719,
29
+ "learning_rate": 4.769712601326456e-09,
30
+ "loss": 0.4562,
31
+ "step": 1000
32
+ },
33
+ {
34
+ "epoch": 0.18422991893883567,
35
+ "eval_loss": 1.4853259325027466,
36
+ "eval_runtime": 74.6533,
37
+ "eval_samples_per_second": 16.168,
38
+ "eval_steps_per_second": 2.023,
39
+ "step": 1000
40
+ },
41
+ {
42
+ "epoch": 0.2763448784082535,
43
+ "grad_norm": 1.179814338684082,
44
+ "learning_rate": 4.654568901989683e-09,
45
+ "loss": 0.5131,
46
+ "step": 1500
47
+ },
48
+ {
49
+ "epoch": 0.2763448784082535,
50
+ "eval_loss": 1.4757832288742065,
51
+ "eval_runtime": 76.2866,
52
+ "eval_samples_per_second": 15.822,
53
+ "eval_steps_per_second": 1.979,
54
+ "step": 1500
55
+ },
56
+ {
57
+ "epoch": 0.36845983787767134,
58
+ "grad_norm": 0.900991678237915,
59
+ "learning_rate": 4.5394252026529105e-09,
60
+ "loss": 0.4481,
61
+ "step": 2000
62
+ },
63
+ {
64
+ "epoch": 0.36845983787767134,
65
+ "eval_loss": 1.4675536155700684,
66
+ "eval_runtime": 75.9348,
67
+ "eval_samples_per_second": 15.895,
68
+ "eval_steps_per_second": 1.989,
69
+ "step": 2000
70
+ },
71
+ {
72
+ "epoch": 0.46057479734708917,
73
+ "grad_norm": 346.93853759765625,
74
+ "learning_rate": 4.424281503316139e-09,
75
+ "loss": 0.4839,
76
+ "step": 2500
77
+ },
78
+ {
79
+ "epoch": 0.46057479734708917,
80
+ "eval_loss": 1.4585391283035278,
81
+ "eval_runtime": 76.7708,
82
+ "eval_samples_per_second": 15.722,
83
+ "eval_steps_per_second": 1.967,
84
+ "step": 2500
85
+ }
86
+ ],
87
+ "logging_steps": 500,
88
+ "max_steps": 21712,
89
+ "num_input_tokens_seen": 0,
90
+ "num_train_epochs": 4,
91
+ "save_steps": 500,
92
+ "stateful_callbacks": {
93
+ "TrainerControl": {
94
+ "args": {
95
+ "should_epoch_stop": false,
96
+ "should_evaluate": false,
97
+ "should_log": false,
98
+ "should_save": true,
99
+ "should_training_stop": false
100
+ },
101
+ "attributes": {}
102
+ }
103
+ },
104
+ "total_flos": 900115394852520.0,
105
+ "train_batch_size": 2,
106
+ "trial_name": null,
107
+ "trial_params": null
108
+ }
checkpoint-2500/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:17dd82d53ab261b7081e3e93503b374d907fea92ca3e505d0ff1319c5765d331
3
+ size 4847