omar22allam commited on
Commit
a9f0965
1 Parent(s): 5d49b58

Training Completed!

Browse files
README.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224
4
+ tags:
5
+ - generated_from_trainer
6
+ datasets:
7
+ - imagefolder
8
+ metrics:
9
+ - accuracy
10
+ model-index:
11
+ - name: MRI_vit
12
+ results:
13
+ - task:
14
+ name: Image Classification
15
+ type: image-classification
16
+ dataset:
17
+ name: imagefolder
18
+ type: imagefolder
19
+ config: default
20
+ split: test
21
+ args: default
22
+ metrics:
23
+ - name: Accuracy
24
+ type: accuracy
25
+ value: 0.9058823529411765
26
+ ---
27
+
28
+ <!-- This model card has been generated automatically according to the information the Trainer had access to. You
29
+ should probably proofread and complete it, then remove this comment. -->
30
+
31
+ # MRI_vit
32
+
33
+ This model is a fine-tuned version of [google/vit-base-patch16-224](https://huggingface.co/google/vit-base-patch16-224) on the imagefolder dataset.
34
+ It achieves the following results on the evaluation set:
35
+ - Loss: 0.4389
36
+ - Accuracy: 0.9059
37
+
38
+ ## Model description
39
+
40
+ More information needed
41
+
42
+ ## Intended uses & limitations
43
+
44
+ More information needed
45
+
46
+ ## Training and evaluation data
47
+
48
+ More information needed
49
+
50
+ ## Training procedure
51
+
52
+ ### Training hyperparameters
53
+
54
+ The following hyperparameters were used during training:
55
+ - learning_rate: 0.0002
56
+ - train_batch_size: 16
57
+ - eval_batch_size: 8
58
+ - seed: 42
59
+ - optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
60
+ - lr_scheduler_type: linear
61
+ - num_epochs: 20
62
+
63
+ ### Training results
64
+
65
+ | Training Loss | Epoch | Step | Validation Loss | Accuracy |
66
+ |:-------------:|:-------:|:----:|:---------------:|:--------:|
67
+ | 0.0101 | 5.5556 | 100 | 0.4389 | 0.9059 |
68
+ | 0.0001 | 11.1111 | 200 | 0.6572 | 0.8941 |
69
+ | 0.0001 | 16.6667 | 300 | 0.6680 | 0.9059 |
70
+
71
+
72
+ ### Framework versions
73
+
74
+ - Transformers 4.40.1
75
+ - Pytorch 2.3.0+cu121
76
+ - Datasets 2.19.1
77
+ - Tokenizers 0.19.1
all_results.json ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.9058823529411765,
4
+ "eval_loss": 0.43892163038253784,
5
+ "eval_runtime": 1.8997,
6
+ "eval_samples_per_second": 44.744,
7
+ "eval_steps_per_second": 5.79,
8
+ "total_flos": 4.417082996594688e+17,
9
+ "train_loss": 0.08463575366784223,
10
+ "train_runtime": 382.9626,
11
+ "train_samples_per_second": 14.884,
12
+ "train_steps_per_second": 0.94
13
+ }
config.json ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "normal",
13
+ "1": "bengin",
14
+ "2": "cancer"
15
+ },
16
+ "image_size": 224,
17
+ "initializer_range": 0.02,
18
+ "intermediate_size": 3072,
19
+ "label2id": {
20
+ "bengin": 1,
21
+ "cancer": 2,
22
+ "normal": 0
23
+ },
24
+ "layer_norm_eps": 1e-12,
25
+ "model_type": "vit",
26
+ "num_attention_heads": 12,
27
+ "num_channels": 3,
28
+ "num_hidden_layers": 12,
29
+ "patch_size": 16,
30
+ "problem_type": "single_label_classification",
31
+ "qkv_bias": true,
32
+ "torch_dtype": "float32",
33
+ "transformers_version": "4.40.1"
34
+ }
eval_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "eval_accuracy": 0.9058823529411765,
4
+ "eval_loss": 0.43892163038253784,
5
+ "eval_runtime": 1.8997,
6
+ "eval_samples_per_second": 44.744,
7
+ "eval_steps_per_second": 5.79
8
+ }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d3c8ed14d2b63d5e9d77534be4a4b5c191d05ea1e5d42fa14887dd7255e26e6b
3
+ size 343227052
preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
runs/May08_09-52-48_cb9d71a650ba/events.out.tfevents.1715161973.cb9d71a650ba.2946.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6cea54b6e5289c4562ed201fd3a69b5726da61b6f7bda4ee8031bb94405bd3de
3
+ size 88
runs/May08_09-56-27_cb9d71a650ba/events.out.tfevents.1715162188.cb9d71a650ba.10808.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c631483ef4846639ecc67d617fbebb97042957835cdb593328f2b277a2f06de3
3
+ size 4685
runs/May08_09-57-09_cb9d71a650ba/events.out.tfevents.1715162240.cb9d71a650ba.10808.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f2a2a5b9d3b0e43d08fbfe8e682d3e69baa2d096d28111498bcb519a60d6a1d5
3
+ size 6134
runs/May08_10-55-55_bdf2d2ff2ea6/events.out.tfevents.1715165761.bdf2d2ff2ea6.3845.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c793b1e749ea08199c4b35af69abae7e14559164dda6f9cc0cf4d6d07f561cec
3
+ size 13550
runs/May08_10-55-55_bdf2d2ff2ea6/events.out.tfevents.1715169529.bdf2d2ff2ea6.3845.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1376141463191becbd9c23e751550ef21c43d8547ad59d0f926790bd1fc25d73
3
+ size 411
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 4.417082996594688e+17,
4
+ "train_loss": 0.08463575366784223,
5
+ "train_runtime": 382.9626,
6
+ "train_samples_per_second": 14.884,
7
+ "train_steps_per_second": 0.94
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,309 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.43892163038253784,
3
+ "best_model_checkpoint": "./MRI_vit/checkpoint-100",
4
+ "epoch": 20.0,
5
+ "eval_steps": 100,
6
+ "global_step": 360,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.5555555555555556,
13
+ "grad_norm": 5.236236095428467,
14
+ "learning_rate": 0.00019444444444444446,
15
+ "loss": 1.0736,
16
+ "step": 10
17
+ },
18
+ {
19
+ "epoch": 1.1111111111111112,
20
+ "grad_norm": 3.906468629837036,
21
+ "learning_rate": 0.00018888888888888888,
22
+ "loss": 0.5346,
23
+ "step": 20
24
+ },
25
+ {
26
+ "epoch": 1.6666666666666665,
27
+ "grad_norm": 14.102361679077148,
28
+ "learning_rate": 0.00018333333333333334,
29
+ "loss": 0.4717,
30
+ "step": 30
31
+ },
32
+ {
33
+ "epoch": 2.2222222222222223,
34
+ "grad_norm": 7.264150142669678,
35
+ "learning_rate": 0.00017777777777777779,
36
+ "loss": 0.3995,
37
+ "step": 40
38
+ },
39
+ {
40
+ "epoch": 2.7777777777777777,
41
+ "grad_norm": 3.351327419281006,
42
+ "learning_rate": 0.00017222222222222224,
43
+ "loss": 0.1877,
44
+ "step": 50
45
+ },
46
+ {
47
+ "epoch": 3.3333333333333335,
48
+ "grad_norm": 7.443002223968506,
49
+ "learning_rate": 0.0001666666666666667,
50
+ "loss": 0.148,
51
+ "step": 60
52
+ },
53
+ {
54
+ "epoch": 3.888888888888889,
55
+ "grad_norm": 0.747875452041626,
56
+ "learning_rate": 0.0001611111111111111,
57
+ "loss": 0.0747,
58
+ "step": 70
59
+ },
60
+ {
61
+ "epoch": 4.444444444444445,
62
+ "grad_norm": 0.011842708103358746,
63
+ "learning_rate": 0.00015555555555555556,
64
+ "loss": 0.0513,
65
+ "step": 80
66
+ },
67
+ {
68
+ "epoch": 5.0,
69
+ "grad_norm": 2.2265398502349854,
70
+ "learning_rate": 0.00015000000000000001,
71
+ "loss": 0.0756,
72
+ "step": 90
73
+ },
74
+ {
75
+ "epoch": 5.555555555555555,
76
+ "grad_norm": 0.008857272565364838,
77
+ "learning_rate": 0.00014444444444444444,
78
+ "loss": 0.0101,
79
+ "step": 100
80
+ },
81
+ {
82
+ "epoch": 5.555555555555555,
83
+ "eval_accuracy": 0.9058823529411765,
84
+ "eval_loss": 0.43892163038253784,
85
+ "eval_runtime": 27.2539,
86
+ "eval_samples_per_second": 3.119,
87
+ "eval_steps_per_second": 0.404,
88
+ "step": 100
89
+ },
90
+ {
91
+ "epoch": 6.111111111111111,
92
+ "grad_norm": 0.0082765007391572,
93
+ "learning_rate": 0.0001388888888888889,
94
+ "loss": 0.0143,
95
+ "step": 110
96
+ },
97
+ {
98
+ "epoch": 6.666666666666667,
99
+ "grad_norm": 0.22718505561351776,
100
+ "learning_rate": 0.00013333333333333334,
101
+ "loss": 0.0033,
102
+ "step": 120
103
+ },
104
+ {
105
+ "epoch": 7.222222222222222,
106
+ "grad_norm": 0.004425971303135157,
107
+ "learning_rate": 0.00012777777777777776,
108
+ "loss": 0.0003,
109
+ "step": 130
110
+ },
111
+ {
112
+ "epoch": 7.777777777777778,
113
+ "grad_norm": 0.16841186583042145,
114
+ "learning_rate": 0.00012222222222222224,
115
+ "loss": 0.0004,
116
+ "step": 140
117
+ },
118
+ {
119
+ "epoch": 8.333333333333334,
120
+ "grad_norm": 0.0038613828364759684,
121
+ "learning_rate": 0.00011666666666666668,
122
+ "loss": 0.0003,
123
+ "step": 150
124
+ },
125
+ {
126
+ "epoch": 8.88888888888889,
127
+ "grad_norm": 0.002841161098331213,
128
+ "learning_rate": 0.00011111111111111112,
129
+ "loss": 0.0002,
130
+ "step": 160
131
+ },
132
+ {
133
+ "epoch": 9.444444444444445,
134
+ "grad_norm": 0.004673393908888102,
135
+ "learning_rate": 0.00010555555555555557,
136
+ "loss": 0.0001,
137
+ "step": 170
138
+ },
139
+ {
140
+ "epoch": 10.0,
141
+ "grad_norm": 0.002209365600720048,
142
+ "learning_rate": 0.0001,
143
+ "loss": 0.0001,
144
+ "step": 180
145
+ },
146
+ {
147
+ "epoch": 10.555555555555555,
148
+ "grad_norm": 0.0015867718029767275,
149
+ "learning_rate": 9.444444444444444e-05,
150
+ "loss": 0.0001,
151
+ "step": 190
152
+ },
153
+ {
154
+ "epoch": 11.11111111111111,
155
+ "grad_norm": 0.0017831118311733007,
156
+ "learning_rate": 8.888888888888889e-05,
157
+ "loss": 0.0001,
158
+ "step": 200
159
+ },
160
+ {
161
+ "epoch": 11.11111111111111,
162
+ "eval_accuracy": 0.8941176470588236,
163
+ "eval_loss": 0.6572446227073669,
164
+ "eval_runtime": 2.0264,
165
+ "eval_samples_per_second": 41.947,
166
+ "eval_steps_per_second": 5.428,
167
+ "step": 200
168
+ },
169
+ {
170
+ "epoch": 11.666666666666666,
171
+ "grad_norm": 0.0011566176544874907,
172
+ "learning_rate": 8.333333333333334e-05,
173
+ "loss": 0.0001,
174
+ "step": 210
175
+ },
176
+ {
177
+ "epoch": 12.222222222222221,
178
+ "grad_norm": 0.0012466717744246125,
179
+ "learning_rate": 7.777777777777778e-05,
180
+ "loss": 0.0001,
181
+ "step": 220
182
+ },
183
+ {
184
+ "epoch": 12.777777777777779,
185
+ "grad_norm": 0.001513423747383058,
186
+ "learning_rate": 7.222222222222222e-05,
187
+ "loss": 0.0001,
188
+ "step": 230
189
+ },
190
+ {
191
+ "epoch": 13.333333333333334,
192
+ "grad_norm": 0.002179619623348117,
193
+ "learning_rate": 6.666666666666667e-05,
194
+ "loss": 0.0001,
195
+ "step": 240
196
+ },
197
+ {
198
+ "epoch": 13.88888888888889,
199
+ "grad_norm": 0.0014120868872851133,
200
+ "learning_rate": 6.111111111111112e-05,
201
+ "loss": 0.0001,
202
+ "step": 250
203
+ },
204
+ {
205
+ "epoch": 14.444444444444445,
206
+ "grad_norm": 0.001132520497776568,
207
+ "learning_rate": 5.555555555555556e-05,
208
+ "loss": 0.0001,
209
+ "step": 260
210
+ },
211
+ {
212
+ "epoch": 15.0,
213
+ "grad_norm": 0.0012154107680544257,
214
+ "learning_rate": 5e-05,
215
+ "loss": 0.0001,
216
+ "step": 270
217
+ },
218
+ {
219
+ "epoch": 15.555555555555555,
220
+ "grad_norm": 0.002115760464221239,
221
+ "learning_rate": 4.4444444444444447e-05,
222
+ "loss": 0.0001,
223
+ "step": 280
224
+ },
225
+ {
226
+ "epoch": 16.11111111111111,
227
+ "grad_norm": 0.0015289848670363426,
228
+ "learning_rate": 3.888888888888889e-05,
229
+ "loss": 0.0001,
230
+ "step": 290
231
+ },
232
+ {
233
+ "epoch": 16.666666666666668,
234
+ "grad_norm": 0.0009787428425624967,
235
+ "learning_rate": 3.3333333333333335e-05,
236
+ "loss": 0.0001,
237
+ "step": 300
238
+ },
239
+ {
240
+ "epoch": 16.666666666666668,
241
+ "eval_accuracy": 0.9058823529411765,
242
+ "eval_loss": 0.6680212616920471,
243
+ "eval_runtime": 1.8187,
244
+ "eval_samples_per_second": 46.736,
245
+ "eval_steps_per_second": 6.048,
246
+ "step": 300
247
+ },
248
+ {
249
+ "epoch": 17.22222222222222,
250
+ "grad_norm": 0.001420836430042982,
251
+ "learning_rate": 2.777777777777778e-05,
252
+ "loss": 0.0001,
253
+ "step": 310
254
+ },
255
+ {
256
+ "epoch": 17.77777777777778,
257
+ "grad_norm": 0.0010666553862392902,
258
+ "learning_rate": 2.2222222222222223e-05,
259
+ "loss": 0.0001,
260
+ "step": 320
261
+ },
262
+ {
263
+ "epoch": 18.333333333333332,
264
+ "grad_norm": 0.0015172103885561228,
265
+ "learning_rate": 1.6666666666666667e-05,
266
+ "loss": 0.0001,
267
+ "step": 330
268
+ },
269
+ {
270
+ "epoch": 18.88888888888889,
271
+ "grad_norm": 0.0011879707453772426,
272
+ "learning_rate": 1.1111111111111112e-05,
273
+ "loss": 0.0001,
274
+ "step": 340
275
+ },
276
+ {
277
+ "epoch": 19.444444444444443,
278
+ "grad_norm": 0.0010126435663551092,
279
+ "learning_rate": 5.555555555555556e-06,
280
+ "loss": 0.0001,
281
+ "step": 350
282
+ },
283
+ {
284
+ "epoch": 20.0,
285
+ "grad_norm": 0.0008644484914839268,
286
+ "learning_rate": 0.0,
287
+ "loss": 0.0001,
288
+ "step": 360
289
+ },
290
+ {
291
+ "epoch": 20.0,
292
+ "step": 360,
293
+ "total_flos": 4.417082996594688e+17,
294
+ "train_loss": 0.08463575366784223,
295
+ "train_runtime": 382.9626,
296
+ "train_samples_per_second": 14.884,
297
+ "train_steps_per_second": 0.94
298
+ }
299
+ ],
300
+ "logging_steps": 10,
301
+ "max_steps": 360,
302
+ "num_input_tokens_seen": 0,
303
+ "num_train_epochs": 20,
304
+ "save_steps": 100,
305
+ "total_flos": 4.417082996594688e+17,
306
+ "train_batch_size": 16,
307
+ "trial_name": null,
308
+ "trial_params": null
309
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:457a4f6a155fba095bde6d77ebdfb0d9648139ed63be3da9aca8aeb714c1c11b
3
+ size 4984