JorgeGIT commited on
Commit
e52a769
1 Parent(s): ca5c20f

Model save

Browse files
README.md CHANGED
@@ -22,7 +22,7 @@ model-index:
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
- value: 0.9548872180451128
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
@@ -32,8 +32,8 @@ should probably proofread and complete it, then remove this comment. -->
32
 
33
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
- - Loss: 0.1823
36
- - Accuracy: 0.9549
37
 
38
  ## Model description
39
 
@@ -53,20 +53,21 @@ More information needed
53
 
54
  The following hyperparameters were used during training:
55
  - learning_rate: 0.0002
56
- - train_batch_size: 16
57
  - eval_batch_size: 8
58
  - seed: 42
59
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
  - lr_scheduler_type: linear
61
- - num_epochs: 4
62
  - mixed_precision_training: Native AMP
63
 
64
  ### Training results
65
 
66
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
67
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
68
- | 0.4862 | 1.49 | 100 | 0.4092 | 0.9173 |
69
- | 0.2095 | 2.99 | 200 | 0.1823 | 0.9549 |
 
70
 
71
 
72
  ### Framework versions
 
22
  metrics:
23
  - name: Accuracy
24
  type: accuracy
25
+ value: 0.9774436090225563
26
  ---
27
 
28
  <!-- This model card has been generated automatically according to the information the Trainer had access to. You
 
32
 
33
  This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
34
  It achieves the following results on the evaluation set:
35
+ - Loss: 0.0870
36
+ - Accuracy: 0.9774
37
 
38
  ## Model description
39
 
 
53
 
54
  The following hyperparameters were used during training:
55
  - learning_rate: 0.0002
56
+ - train_batch_size: 32
57
  - eval_batch_size: 8
58
  - seed: 42
59
  - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
  - lr_scheduler_type: linear
61
+ - num_epochs: 10
62
  - mixed_precision_training: Native AMP
63
 
64
  ### Training results
65
 
66
  | Training Loss | Epoch | Step | Validation Loss | Accuracy |
67
  |:-------------:|:-----:|:----:|:---------------:|:--------:|
68
+ | 0.1606 | 2.94 | 100 | 0.1829 | 0.9511 |
69
+ | 0.1895 | 5.88 | 200 | 0.1441 | 0.9662 |
70
+ | 0.0423 | 8.82 | 300 | 0.0870 | 0.9774 |
71
 
72
 
73
  ### Framework versions
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9548872180451128,
4
+ "eval_loss": 0.18233025074005127,
5
+ "eval_runtime": 2.6375,
6
+ "eval_samples_per_second": 100.853,
7
+ "eval_steps_per_second": 12.891,
8
+ "total_flos": 3.292007238315049e+17,
9
+ "train_loss": 0.5091555158593761,
10
+ "train_runtime": 958.3438,
11
+ "train_samples_per_second": 4.433,
12
+ "train_steps_per_second": 0.28
13
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "eval_accuracy": 0.9548872180451128,
4
+ "eval_loss": 0.18233025074005127,
5
+ "eval_runtime": 2.6375,
6
+ "eval_samples_per_second": 100.853,
7
+ "eval_steps_per_second": 12.891
8
+ }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:5a995b7567cdb05dfd9c3fa30e3aa0d7d8620e2d8c6b3206adc2af55a77badef
3
  size 343239356
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:86d8fbbe5587bcb92891682aa722012947d3377ee2016b310819dd2600bc17ef
3
  size 343239356
runs/Nov30_22-57-39_6c26dad0aabb/events.out.tfevents.1701386629.6c26dad0aabb.7066.2 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bea73d0d4adbbe9129f6586d6751a267009f6fa8865d7c9a5b0cc9cb9ad6dd30
3
+ size 411
runs/Nov30_23-24-53_6c26dad0aabb/events.out.tfevents.1701386704.6c26dad0aabb.7066.3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:602b22072b791babf9ea719e668e129769682f42f6f62da6b1287ad798a2aa10
3
+ size 11146
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 4.0,
3
+ "total_flos": 3.292007238315049e+17,
4
+ "train_loss": 0.5091555158593761,
5
+ "train_runtime": 958.3438,
6
+ "train_samples_per_second": 4.433,
7
+ "train_steps_per_second": 0.28
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,202 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.18233025074005127,
3
+ "best_model_checkpoint": "finetuned-Leukemia-cell/checkpoint-200",
4
+ "epoch": 4.0,
5
+ "eval_steps": 100,
6
+ "global_step": 268,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.15,
13
+ "learning_rate": 0.00019253731343283584,
14
+ "loss": 1.7951,
15
+ "step": 10
16
+ },
17
+ {
18
+ "epoch": 0.3,
19
+ "learning_rate": 0.00018507462686567165,
20
+ "loss": 1.3572,
21
+ "step": 20
22
+ },
23
+ {
24
+ "epoch": 0.45,
25
+ "learning_rate": 0.00017761194029850748,
26
+ "loss": 0.98,
27
+ "step": 30
28
+ },
29
+ {
30
+ "epoch": 0.6,
31
+ "learning_rate": 0.00017014925373134328,
32
+ "loss": 0.9394,
33
+ "step": 40
34
+ },
35
+ {
36
+ "epoch": 0.75,
37
+ "learning_rate": 0.00016268656716417911,
38
+ "loss": 0.7973,
39
+ "step": 50
40
+ },
41
+ {
42
+ "epoch": 0.9,
43
+ "learning_rate": 0.00015522388059701495,
44
+ "loss": 0.8591,
45
+ "step": 60
46
+ },
47
+ {
48
+ "epoch": 1.04,
49
+ "learning_rate": 0.00014776119402985075,
50
+ "loss": 0.6357,
51
+ "step": 70
52
+ },
53
+ {
54
+ "epoch": 1.19,
55
+ "learning_rate": 0.00014029850746268658,
56
+ "loss": 0.6522,
57
+ "step": 80
58
+ },
59
+ {
60
+ "epoch": 1.34,
61
+ "learning_rate": 0.0001328358208955224,
62
+ "loss": 0.495,
63
+ "step": 90
64
+ },
65
+ {
66
+ "epoch": 1.49,
67
+ "learning_rate": 0.00012537313432835822,
68
+ "loss": 0.4862,
69
+ "step": 100
70
+ },
71
+ {
72
+ "epoch": 1.49,
73
+ "eval_accuracy": 0.9172932330827067,
74
+ "eval_loss": 0.4092223644256592,
75
+ "eval_runtime": 173.193,
76
+ "eval_samples_per_second": 1.536,
77
+ "eval_steps_per_second": 0.196,
78
+ "step": 100
79
+ },
80
+ {
81
+ "epoch": 1.64,
82
+ "learning_rate": 0.00011791044776119405,
83
+ "loss": 0.5047,
84
+ "step": 110
85
+ },
86
+ {
87
+ "epoch": 1.79,
88
+ "learning_rate": 0.00011044776119402987,
89
+ "loss": 0.3878,
90
+ "step": 120
91
+ },
92
+ {
93
+ "epoch": 1.94,
94
+ "learning_rate": 0.00010298507462686569,
95
+ "loss": 0.43,
96
+ "step": 130
97
+ },
98
+ {
99
+ "epoch": 2.09,
100
+ "learning_rate": 9.552238805970149e-05,
101
+ "loss": 0.3441,
102
+ "step": 140
103
+ },
104
+ {
105
+ "epoch": 2.24,
106
+ "learning_rate": 8.805970149253732e-05,
107
+ "loss": 0.3271,
108
+ "step": 150
109
+ },
110
+ {
111
+ "epoch": 2.39,
112
+ "learning_rate": 8.059701492537314e-05,
113
+ "loss": 0.3415,
114
+ "step": 160
115
+ },
116
+ {
117
+ "epoch": 2.54,
118
+ "learning_rate": 7.313432835820896e-05,
119
+ "loss": 0.293,
120
+ "step": 170
121
+ },
122
+ {
123
+ "epoch": 2.69,
124
+ "learning_rate": 6.567164179104478e-05,
125
+ "loss": 0.2864,
126
+ "step": 180
127
+ },
128
+ {
129
+ "epoch": 2.84,
130
+ "learning_rate": 5.82089552238806e-05,
131
+ "loss": 0.2873,
132
+ "step": 190
133
+ },
134
+ {
135
+ "epoch": 2.99,
136
+ "learning_rate": 5.074626865671642e-05,
137
+ "loss": 0.2095,
138
+ "step": 200
139
+ },
140
+ {
141
+ "epoch": 2.99,
142
+ "eval_accuracy": 0.9548872180451128,
143
+ "eval_loss": 0.18233025074005127,
144
+ "eval_runtime": 2.578,
145
+ "eval_samples_per_second": 103.183,
146
+ "eval_steps_per_second": 13.189,
147
+ "step": 200
148
+ },
149
+ {
150
+ "epoch": 3.13,
151
+ "learning_rate": 4.328358208955224e-05,
152
+ "loss": 0.1822,
153
+ "step": 210
154
+ },
155
+ {
156
+ "epoch": 3.28,
157
+ "learning_rate": 3.582089552238806e-05,
158
+ "loss": 0.228,
159
+ "step": 220
160
+ },
161
+ {
162
+ "epoch": 3.43,
163
+ "learning_rate": 2.835820895522388e-05,
164
+ "loss": 0.1377,
165
+ "step": 230
166
+ },
167
+ {
168
+ "epoch": 3.58,
169
+ "learning_rate": 2.0895522388059702e-05,
170
+ "loss": 0.2099,
171
+ "step": 240
172
+ },
173
+ {
174
+ "epoch": 3.73,
175
+ "learning_rate": 1.3432835820895523e-05,
176
+ "loss": 0.1576,
177
+ "step": 250
178
+ },
179
+ {
180
+ "epoch": 3.88,
181
+ "learning_rate": 5.970149253731343e-06,
182
+ "loss": 0.1394,
183
+ "step": 260
184
+ },
185
+ {
186
+ "epoch": 4.0,
187
+ "step": 268,
188
+ "total_flos": 3.292007238315049e+17,
189
+ "train_loss": 0.5091555158593761,
190
+ "train_runtime": 958.3438,
191
+ "train_samples_per_second": 4.433,
192
+ "train_steps_per_second": 0.28
193
+ }
194
+ ],
195
+ "logging_steps": 10,
196
+ "max_steps": 268,
197
+ "num_train_epochs": 4,
198
+ "save_steps": 100,
199
+ "total_flos": 3.292007238315049e+17,
200
+ "trial_name": null,
201
+ "trial_params": null
202
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:98f8a3a2f49f3b298aa444fc183b1fa523ebc06d66cf841ca5da0708719ef5cb
3
  size 4600
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:945f141fbeb29ad39b8605b232d98a21f0507a88e5e0eb935c00a2a7f5a4a921
3
  size 4600