ppak10 commited on
Commit
bc5af70
1 Parent(s): 10eae1c

test_ViT-Masked_4

Browse files
Files changed (5) hide show
  1. README.md +50 -0
  2. config.json +22 -0
  3. model.safetensors +3 -0
  4. trainer_state.json +100 -0
  5. training_args.bin +3 -0
README.md ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ tags:
3
+ - generated_from_trainer
4
+ model-index:
5
+ - name: test_ViT-Masked_4
6
+ results: []
7
+ ---
8
+
9
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
10
+ should probably proofread and complete it, then remove this comment. -->
11
+
12
+ # test_ViT-Masked_4
13
+
14
+ This model is a fine-tuned version of [](https://huggingface.co/) on the ppak10/Melt-Pool-Thermal-Images dataset.
15
+
16
+ ## Model description
17
+
18
+ More information needed
19
+
20
+ ## Intended uses & limitations
21
+
22
+ More information needed
23
+
24
+ ## Training and evaluation data
25
+
26
+ More information needed
27
+
28
+ ## Training procedure
29
+
30
+ ### Training hyperparameters
31
+
32
+ The following hyperparameters were used during training:
33
+ - learning_rate: 1e-05
34
+ - train_batch_size: 2048
35
+ - eval_batch_size: 16
36
+ - seed: 42
37
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
38
+ - lr_scheduler_type: linear
39
+ - num_epochs: 10
40
+
41
+ ### Training results
42
+
43
+
44
+
45
+ ### Framework versions
46
+
47
+ - Transformers 4.40.1
48
+ - Pytorch 2.0.1+cu117
49
+ - Datasets 2.19.0
50
+ - Tokenizers 0.19.1
config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "ViTForMaskedImageModeling"
4
+ ],
5
+ "attention_probs_dropout_prob": 0.0,
6
+ "encoder_stride": 8,
7
+ "hidden_act": "gelu",
8
+ "hidden_dropout_prob": 0.0,
9
+ "hidden_size": 768,
10
+ "image_size": 64,
11
+ "initializer_range": 0.02,
12
+ "intermediate_size": 3072,
13
+ "layer_norm_eps": 1e-12,
14
+ "model_type": "vit",
15
+ "num_attention_heads": 12,
16
+ "num_channels": 1,
17
+ "num_hidden_layers": 12,
18
+ "patch_size": 8,
19
+ "qkv_bias": true,
20
+ "torch_dtype": "float32",
21
+ "transformers_version": "4.40.1"
22
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a43adaced2939f3d15d7fe0393bbef9f2c97caa753575cfd13564ab423c9f892
3
+ size 340849656
trainer_state.json ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": null,
3
+ "best_model_checkpoint": null,
4
+ "epoch": 10.0,
5
+ "eval_steps": 500,
6
+ "global_step": 2590,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 1.9305019305019306,
13
+ "grad_norm": 3.1227238178253174,
14
+ "learning_rate": 8.06949806949807e-06,
15
+ "loss": 0.0861,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 1.9305019305019306,
20
+ "eval_runtime": 661.7783,
21
+ "eval_samples_per_second": 266.734,
22
+ "eval_steps_per_second": 16.672,
23
+ "step": 500
24
+ },
25
+ {
26
+ "epoch": 3.861003861003861,
27
+ "grad_norm": 4.147549152374268,
28
+ "learning_rate": 6.13899613899614e-06,
29
+ "loss": 0.0554,
30
+ "step": 1000
31
+ },
32
+ {
33
+ "epoch": 3.861003861003861,
34
+ "eval_runtime": 639.2021,
35
+ "eval_samples_per_second": 276.155,
36
+ "eval_steps_per_second": 17.261,
37
+ "step": 1000
38
+ },
39
+ {
40
+ "epoch": 5.7915057915057915,
41
+ "grad_norm": 4.684039115905762,
42
+ "learning_rate": 4.208494208494209e-06,
43
+ "loss": 0.0428,
44
+ "step": 1500
45
+ },
46
+ {
47
+ "epoch": 5.7915057915057915,
48
+ "eval_runtime": 638.2108,
49
+ "eval_samples_per_second": 276.584,
50
+ "eval_steps_per_second": 17.287,
51
+ "step": 1500
52
+ },
53
+ {
54
+ "epoch": 7.722007722007722,
55
+ "grad_norm": 3.6964356899261475,
56
+ "learning_rate": 2.2779922779922782e-06,
57
+ "loss": 0.0351,
58
+ "step": 2000
59
+ },
60
+ {
61
+ "epoch": 7.722007722007722,
62
+ "eval_runtime": 637.2615,
63
+ "eval_samples_per_second": 276.996,
64
+ "eval_steps_per_second": 17.313,
65
+ "step": 2000
66
+ },
67
+ {
68
+ "epoch": 9.652509652509652,
69
+ "grad_norm": 0.3747519552707672,
70
+ "learning_rate": 3.474903474903475e-07,
71
+ "loss": 0.0314,
72
+ "step": 2500
73
+ },
74
+ {
75
+ "epoch": 9.652509652509652,
76
+ "eval_runtime": 643.7118,
77
+ "eval_samples_per_second": 274.221,
78
+ "eval_steps_per_second": 17.14,
79
+ "step": 2500
80
+ },
81
+ {
82
+ "epoch": 10.0,
83
+ "step": 2590,
84
+ "total_flos": 1.1089078720895386e+19,
85
+ "train_loss": 0.04947091428469507,
86
+ "train_runtime": 17619.1199,
87
+ "train_samples_per_second": 300.557,
88
+ "train_steps_per_second": 0.147
89
+ }
90
+ ],
91
+ "logging_steps": 500,
92
+ "max_steps": 2590,
93
+ "num_input_tokens_seen": 0,
94
+ "num_train_epochs": 10,
95
+ "save_steps": 100,
96
+ "total_flos": 1.1089078720895386e+19,
97
+ "train_batch_size": 2048,
98
+ "trial_name": null,
99
+ "trial_params": null
100
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d06bd04dbd8729e94f967ee0cc3025883281f1257daa98f35a929899f79c84db
3
+ size 4539