kamran7006 commited on
Commit
b863e4d
1 Parent(s): ad69475

Upload . with huggingface_hub

Browse files
checkpoint-150/config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "no",
13
+ "1": "yes"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "no": 0,
20
+ "yes": 1
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.28.1"
32
+ }
checkpoint-150/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2cdd456135361a2664b9da2db5d33c5bf4f520da9e81f3e1252a8c4a9250a069
3
+ size 686518725
checkpoint-150/preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
checkpoint-150/pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9c92252f8ad98f84d857949b71372b836d5bd551491ea1dda8240bab653a2d7
3
+ size 343268717
checkpoint-150/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:297940ead5ab28c9ca795f1f27b80aa2505f6e6f878a5cecc9789065914dadb8
3
+ size 14575
checkpoint-150/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6c8dcdc750e17c0e34e7ba218a04181c319f9b4bc8765e711a34bfe504c74f3d
3
+ size 627
checkpoint-150/trainer_state.json ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.233134925365448,
3
+ "best_model_checkpoint": "brain_tumor_detection/checkpoint-150",
4
+ "epoch": 25.0,
5
+ "global_step": 150,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.4342105263157895,
13
+ "eval_loss": 0.6988278031349182,
14
+ "eval_runtime": 0.9935,
15
+ "eval_samples_per_second": 76.501,
16
+ "eval_steps_per_second": 19.125,
17
+ "step": 6
18
+ },
19
+ {
20
+ "epoch": 2.0,
21
+ "eval_accuracy": 0.5526315789473685,
22
+ "eval_loss": 0.6916604042053223,
23
+ "eval_runtime": 0.956,
24
+ "eval_samples_per_second": 79.501,
25
+ "eval_steps_per_second": 19.875,
26
+ "step": 12
27
+ },
28
+ {
29
+ "epoch": 3.0,
30
+ "eval_accuracy": 0.6578947368421053,
31
+ "eval_loss": 0.6795731782913208,
32
+ "eval_runtime": 0.9513,
33
+ "eval_samples_per_second": 79.888,
34
+ "eval_steps_per_second": 19.972,
35
+ "step": 18
36
+ },
37
+ {
38
+ "epoch": 4.0,
39
+ "eval_accuracy": 0.6447368421052632,
40
+ "eval_loss": 0.6657436490058899,
41
+ "eval_runtime": 0.9493,
42
+ "eval_samples_per_second": 80.062,
43
+ "eval_steps_per_second": 20.016,
44
+ "step": 24
45
+ },
46
+ {
47
+ "epoch": 5.0,
48
+ "eval_accuracy": 0.6842105263157895,
49
+ "eval_loss": 0.6453096270561218,
50
+ "eval_runtime": 0.9483,
51
+ "eval_samples_per_second": 80.144,
52
+ "eval_steps_per_second": 20.036,
53
+ "step": 30
54
+ },
55
+ {
56
+ "epoch": 6.0,
57
+ "eval_accuracy": 0.6710526315789473,
58
+ "eval_loss": 0.617803692817688,
59
+ "eval_runtime": 0.9738,
60
+ "eval_samples_per_second": 78.043,
61
+ "eval_steps_per_second": 19.511,
62
+ "step": 36
63
+ },
64
+ {
65
+ "epoch": 7.0,
66
+ "eval_accuracy": 0.7105263157894737,
67
+ "eval_loss": 0.5901638865470886,
68
+ "eval_runtime": 0.9528,
69
+ "eval_samples_per_second": 79.766,
70
+ "eval_steps_per_second": 19.941,
71
+ "step": 42
72
+ },
73
+ {
74
+ "epoch": 8.0,
75
+ "eval_accuracy": 0.7763157894736842,
76
+ "eval_loss": 0.5550716519355774,
77
+ "eval_runtime": 0.9514,
78
+ "eval_samples_per_second": 79.886,
79
+ "eval_steps_per_second": 19.972,
80
+ "step": 48
81
+ },
82
+ {
83
+ "epoch": 9.0,
84
+ "eval_accuracy": 0.8552631578947368,
85
+ "eval_loss": 0.5136155486106873,
86
+ "eval_runtime": 0.9467,
87
+ "eval_samples_per_second": 80.28,
88
+ "eval_steps_per_second": 20.07,
89
+ "step": 54
90
+ },
91
+ {
92
+ "epoch": 10.0,
93
+ "eval_accuracy": 0.8947368421052632,
94
+ "eval_loss": 0.47127261757850647,
95
+ "eval_runtime": 0.978,
96
+ "eval_samples_per_second": 77.706,
97
+ "eval_steps_per_second": 19.427,
98
+ "step": 60
99
+ },
100
+ {
101
+ "epoch": 11.0,
102
+ "eval_accuracy": 0.8947368421052632,
103
+ "eval_loss": 0.4335399866104126,
104
+ "eval_runtime": 0.9791,
105
+ "eval_samples_per_second": 77.618,
106
+ "eval_steps_per_second": 19.405,
107
+ "step": 66
108
+ },
109
+ {
110
+ "epoch": 12.0,
111
+ "eval_accuracy": 0.9078947368421053,
112
+ "eval_loss": 0.4003973603248596,
113
+ "eval_runtime": 0.9657,
114
+ "eval_samples_per_second": 78.702,
115
+ "eval_steps_per_second": 19.675,
116
+ "step": 72
117
+ },
118
+ {
119
+ "epoch": 13.0,
120
+ "eval_accuracy": 0.9210526315789473,
121
+ "eval_loss": 0.3692227303981781,
122
+ "eval_runtime": 0.9471,
123
+ "eval_samples_per_second": 80.243,
124
+ "eval_steps_per_second": 20.061,
125
+ "step": 78
126
+ },
127
+ {
128
+ "epoch": 14.0,
129
+ "eval_accuracy": 0.9078947368421053,
130
+ "eval_loss": 0.34214863181114197,
131
+ "eval_runtime": 0.9618,
132
+ "eval_samples_per_second": 79.018,
133
+ "eval_steps_per_second": 19.754,
134
+ "step": 84
135
+ },
136
+ {
137
+ "epoch": 15.0,
138
+ "eval_accuracy": 0.9210526315789473,
139
+ "eval_loss": 0.3197132349014282,
140
+ "eval_runtime": 1.0155,
141
+ "eval_samples_per_second": 74.839,
142
+ "eval_steps_per_second": 18.71,
143
+ "step": 90
144
+ },
145
+ {
146
+ "epoch": 16.0,
147
+ "eval_accuracy": 0.9210526315789473,
148
+ "eval_loss": 0.2996158003807068,
149
+ "eval_runtime": 0.9757,
150
+ "eval_samples_per_second": 77.891,
151
+ "eval_steps_per_second": 19.473,
152
+ "step": 96
153
+ },
154
+ {
155
+ "epoch": 17.0,
156
+ "eval_accuracy": 0.8947368421052632,
157
+ "eval_loss": 0.28496354818344116,
158
+ "eval_runtime": 0.9735,
159
+ "eval_samples_per_second": 78.068,
160
+ "eval_steps_per_second": 19.517,
161
+ "step": 102
162
+ },
163
+ {
164
+ "epoch": 18.0,
165
+ "eval_accuracy": 0.9210526315789473,
166
+ "eval_loss": 0.2703515589237213,
167
+ "eval_runtime": 1.0086,
168
+ "eval_samples_per_second": 75.349,
169
+ "eval_steps_per_second": 18.837,
170
+ "step": 108
171
+ },
172
+ {
173
+ "epoch": 19.0,
174
+ "eval_accuracy": 0.9210526315789473,
175
+ "eval_loss": 0.25838443636894226,
176
+ "eval_runtime": 0.9799,
177
+ "eval_samples_per_second": 77.562,
178
+ "eval_steps_per_second": 19.391,
179
+ "step": 114
180
+ },
181
+ {
182
+ "epoch": 20.0,
183
+ "eval_accuracy": 0.9210526315789473,
184
+ "eval_loss": 0.24787285923957825,
185
+ "eval_runtime": 0.9801,
186
+ "eval_samples_per_second": 77.542,
187
+ "eval_steps_per_second": 19.385,
188
+ "step": 120
189
+ },
190
+ {
191
+ "epoch": 21.0,
192
+ "eval_accuracy": 0.9342105263157895,
193
+ "eval_loss": 0.24149483442306519,
194
+ "eval_runtime": 0.9873,
195
+ "eval_samples_per_second": 76.976,
196
+ "eval_steps_per_second": 19.244,
197
+ "step": 126
198
+ },
199
+ {
200
+ "epoch": 22.0,
201
+ "eval_accuracy": 0.9342105263157895,
202
+ "eval_loss": 0.23940841853618622,
203
+ "eval_runtime": 0.9607,
204
+ "eval_samples_per_second": 79.112,
205
+ "eval_steps_per_second": 19.778,
206
+ "step": 132
207
+ },
208
+ {
209
+ "epoch": 23.0,
210
+ "eval_accuracy": 0.8947368421052632,
211
+ "eval_loss": 0.2379884272813797,
212
+ "eval_runtime": 0.9958,
213
+ "eval_samples_per_second": 76.321,
214
+ "eval_steps_per_second": 19.08,
215
+ "step": 138
216
+ },
217
+ {
218
+ "epoch": 24.0,
219
+ "eval_accuracy": 0.9078947368421053,
220
+ "eval_loss": 0.2344084084033966,
221
+ "eval_runtime": 0.956,
222
+ "eval_samples_per_second": 79.494,
223
+ "eval_steps_per_second": 19.873,
224
+ "step": 144
225
+ },
226
+ {
227
+ "epoch": 25.0,
228
+ "eval_accuracy": 0.9342105263157895,
229
+ "eval_loss": 0.233134925365448,
230
+ "eval_runtime": 0.9514,
231
+ "eval_samples_per_second": 79.879,
232
+ "eval_steps_per_second": 19.97,
233
+ "step": 150
234
+ }
235
+ ],
236
+ "max_steps": 150,
237
+ "num_train_epochs": 25,
238
+ "total_flos": 3.429020540445696e+17,
239
+ "trial_name": null,
240
+ "trial_params": null
241
+ }
checkpoint-150/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24af2245e01104d2a86c1ab9f5f0ac0040b31dd1944679f40c0b476bebaea8e3
3
+ size 3579
config.json ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "no",
13
+ "1": "yes"
14
+ },
15
+ "image_size": 224,
16
+ "initializer_range": 0.02,
17
+ "intermediate_size": 3072,
18
+ "label2id": {
19
+ "no": 0,
20
+ "yes": 1
21
+ },
22
+ "layer_norm_eps": 1e-12,
23
+ "model_type": "vit",
24
+ "num_attention_heads": 12,
25
+ "num_channels": 3,
26
+ "num_hidden_layers": 12,
27
+ "patch_size": 16,
28
+ "problem_type": "single_label_classification",
29
+ "qkv_bias": true,
30
+ "torch_dtype": "float32",
31
+ "transformers_version": "4.28.1"
32
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
pytorch_model.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9c92252f8ad98f84d857949b71372b836d5bd551491ea1dda8240bab653a2d7
3
+ size 343268717
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:24af2245e01104d2a86c1ab9f5f0ac0040b31dd1944679f40c0b476bebaea8e3
3
+ size 3579