End of training
Browse files
README.md
ADDED
@@ -0,0 +1,127 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
---
|
2 |
+
license: apache-2.0
|
3 |
+
base_model: google/vit-base-patch16-224-in21k
|
4 |
+
tags:
|
5 |
+
- generated_from_trainer
|
6 |
+
datasets:
|
7 |
+
- imagefolder
|
8 |
+
metrics:
|
9 |
+
- accuracy
|
10 |
+
model-index:
|
11 |
+
- name: Chess_Images
|
12 |
+
results:
|
13 |
+
- task:
|
14 |
+
name: Image Classification
|
15 |
+
type: image-classification
|
16 |
+
dataset:
|
17 |
+
name: imagefolder
|
18 |
+
type: imagefolder
|
19 |
+
config: default
|
20 |
+
split: train
|
21 |
+
args: default
|
22 |
+
metrics:
|
23 |
+
- name: Accuracy
|
24 |
+
type: accuracy
|
25 |
+
value: 0.9333333333333333
|
26 |
+
---
|
27 |
+
|
28 |
+
<!-- This model card has been generated automatically according to the information the Trainer had access to. You
|
29 |
+
should probably proofread and complete it, then remove this comment. -->
|
30 |
+
|
31 |
+
# Chess_Images
|
32 |
+
|
33 |
+
This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on the imagefolder dataset.
|
34 |
+
It achieves the following results on the evaluation set:
|
35 |
+
- Loss: 0.2460
|
36 |
+
- Accuracy: 0.9333
|
37 |
+
|
38 |
+
## Model description
|
39 |
+
|
40 |
+
More information needed
|
41 |
+
|
42 |
+
## Intended uses & limitations
|
43 |
+
|
44 |
+
More information needed
|
45 |
+
|
46 |
+
## Training and evaluation data
|
47 |
+
|
48 |
+
More information needed
|
49 |
+
|
50 |
+
## Training procedure
|
51 |
+
|
52 |
+
### Training hyperparameters
|
53 |
+
|
54 |
+
The following hyperparameters were used during training:
|
55 |
+
- learning_rate: 5e-05
|
56 |
+
- train_batch_size: 16
|
57 |
+
- eval_batch_size: 16
|
58 |
+
- seed: 42
|
59 |
+
- gradient_accumulation_steps: 4
|
60 |
+
- total_train_batch_size: 64
|
61 |
+
- optimizer: Adam with betas=(0.9,0.999) and epsilon=1e-08
|
62 |
+
- lr_scheduler_type: linear
|
63 |
+
- lr_scheduler_warmup_ratio: 0.1
|
64 |
+
- num_epochs: 50
|
65 |
+
|
66 |
+
### Training results
|
67 |
+
|
68 |
+
| Training Loss | Epoch | Step | Validation Loss | Accuracy |
|
69 |
+
|:-------------:|:-----:|:----:|:---------------:|:--------:|
|
70 |
+
| No log | 1.0 | 2 | 0.3365 | 0.9333 |
|
71 |
+
| No log | 2.0 | 4 | 0.3018 | 0.9333 |
|
72 |
+
| No log | 3.0 | 6 | 0.3443 | 0.9667 |
|
73 |
+
| No log | 4.0 | 8 | 0.2189 | 1.0 |
|
74 |
+
| 0.213 | 5.0 | 10 | 0.3188 | 0.9667 |
|
75 |
+
| 0.213 | 6.0 | 12 | 0.2903 | 0.9333 |
|
76 |
+
| 0.213 | 7.0 | 14 | 0.3398 | 0.9 |
|
77 |
+
| 0.213 | 8.0 | 16 | 0.3879 | 0.8667 |
|
78 |
+
| 0.213 | 9.0 | 18 | 0.3023 | 0.9333 |
|
79 |
+
| 0.2116 | 10.0 | 20 | 0.1857 | 1.0 |
|
80 |
+
| 0.2116 | 11.0 | 22 | 0.2737 | 0.9667 |
|
81 |
+
| 0.2116 | 12.0 | 24 | 0.2675 | 1.0 |
|
82 |
+
| 0.2116 | 13.0 | 26 | 0.2817 | 0.9333 |
|
83 |
+
| 0.2116 | 14.0 | 28 | 0.4394 | 0.8667 |
|
84 |
+
| 0.1837 | 15.0 | 30 | 0.3167 | 0.9 |
|
85 |
+
| 0.1837 | 16.0 | 32 | 0.2795 | 0.9333 |
|
86 |
+
| 0.1837 | 17.0 | 34 | 0.2315 | 0.9333 |
|
87 |
+
| 0.1837 | 18.0 | 36 | 0.2266 | 0.9667 |
|
88 |
+
| 0.1837 | 19.0 | 38 | 0.3199 | 0.9333 |
|
89 |
+
| 0.1726 | 20.0 | 40 | 0.2553 | 0.9667 |
|
90 |
+
| 0.1726 | 21.0 | 42 | 0.3804 | 0.9 |
|
91 |
+
| 0.1726 | 22.0 | 44 | 0.2118 | 0.9667 |
|
92 |
+
| 0.1726 | 23.0 | 46 | 0.1784 | 1.0 |
|
93 |
+
| 0.1726 | 24.0 | 48 | 0.2098 | 0.9667 |
|
94 |
+
| 0.1529 | 25.0 | 50 | 0.1676 | 1.0 |
|
95 |
+
| 0.1529 | 26.0 | 52 | 0.2980 | 0.9 |
|
96 |
+
| 0.1529 | 27.0 | 54 | 0.2726 | 0.9667 |
|
97 |
+
| 0.1529 | 28.0 | 56 | 0.1756 | 1.0 |
|
98 |
+
| 0.1529 | 29.0 | 58 | 0.2266 | 0.9667 |
|
99 |
+
| 0.1335 | 30.0 | 60 | 0.3161 | 0.9333 |
|
100 |
+
| 0.1335 | 31.0 | 62 | 0.2872 | 0.9333 |
|
101 |
+
| 0.1335 | 32.0 | 64 | 0.2030 | 1.0 |
|
102 |
+
| 0.1335 | 33.0 | 66 | 0.2297 | 0.9333 |
|
103 |
+
| 0.1335 | 34.0 | 68 | 0.2876 | 0.9333 |
|
104 |
+
| 0.1228 | 35.0 | 70 | 0.1432 | 1.0 |
|
105 |
+
| 0.1228 | 36.0 | 72 | 0.2194 | 0.9667 |
|
106 |
+
| 0.1228 | 37.0 | 74 | 0.1387 | 1.0 |
|
107 |
+
| 0.1228 | 38.0 | 76 | 0.1381 | 1.0 |
|
108 |
+
| 0.1228 | 39.0 | 78 | 0.1540 | 1.0 |
|
109 |
+
| 0.1324 | 40.0 | 80 | 0.3075 | 0.8667 |
|
110 |
+
| 0.1324 | 41.0 | 82 | 0.1892 | 1.0 |
|
111 |
+
| 0.1324 | 42.0 | 84 | 0.1487 | 1.0 |
|
112 |
+
| 0.1324 | 43.0 | 86 | 0.1515 | 1.0 |
|
113 |
+
| 0.1324 | 44.0 | 88 | 0.2617 | 0.9333 |
|
114 |
+
| 0.136 | 45.0 | 90 | 0.1719 | 0.9667 |
|
115 |
+
| 0.136 | 46.0 | 92 | 0.2501 | 0.9 |
|
116 |
+
| 0.136 | 47.0 | 94 | 0.1618 | 1.0 |
|
117 |
+
| 0.136 | 48.0 | 96 | 0.2175 | 0.9667 |
|
118 |
+
| 0.136 | 49.0 | 98 | 0.2039 | 0.9667 |
|
119 |
+
| 0.1226 | 50.0 | 100 | 0.2460 | 0.9333 |
|
120 |
+
|
121 |
+
|
122 |
+
### Framework versions
|
123 |
+
|
124 |
+
- Transformers 4.38.2
|
125 |
+
- Pytorch 2.2.1+cu121
|
126 |
+
- Datasets 2.18.0
|
127 |
+
- Tokenizers 0.15.2
|
config.json
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"_name_or_path": "google/vit-base-patch16-224-in21k",
|
3 |
+
"architectures": [
|
4 |
+
"ViTForImageClassification"
|
5 |
+
],
|
6 |
+
"attention_probs_dropout_prob": 0.0,
|
7 |
+
"encoder_stride": 16,
|
8 |
+
"hidden_act": "gelu",
|
9 |
+
"hidden_dropout_prob": 0.0,
|
10 |
+
"hidden_size": 768,
|
11 |
+
"id2label": {
|
12 |
+
"0": "Black king",
|
13 |
+
"1": "Black knight",
|
14 |
+
"2": "Black queen",
|
15 |
+
"3": "White king",
|
16 |
+
"4": "White knight",
|
17 |
+
"5": "White queen"
|
18 |
+
},
|
19 |
+
"image_size": 224,
|
20 |
+
"initializer_range": 0.02,
|
21 |
+
"intermediate_size": 3072,
|
22 |
+
"label2id": {
|
23 |
+
"Black king": "0",
|
24 |
+
"Black knight": "1",
|
25 |
+
"Black queen": "2",
|
26 |
+
"White king": "3",
|
27 |
+
"White knight": "4",
|
28 |
+
"White queen": "5"
|
29 |
+
},
|
30 |
+
"layer_norm_eps": 1e-12,
|
31 |
+
"model_type": "vit",
|
32 |
+
"num_attention_heads": 12,
|
33 |
+
"num_channels": 3,
|
34 |
+
"num_hidden_layers": 12,
|
35 |
+
"patch_size": 16,
|
36 |
+
"problem_type": "single_label_classification",
|
37 |
+
"qkv_bias": true,
|
38 |
+
"torch_dtype": "float32",
|
39 |
+
"transformers_version": "4.38.2"
|
40 |
+
}
|
model.safetensors
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:7f58cf493cefe012657bcf2686ed12b1811b11e357648b1b6cee688c28c15b07
|
3 |
+
size 343236280
|
preprocessor_config.json
ADDED
@@ -0,0 +1,22 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"do_normalize": true,
|
3 |
+
"do_rescale": true,
|
4 |
+
"do_resize": true,
|
5 |
+
"image_mean": [
|
6 |
+
0.5,
|
7 |
+
0.5,
|
8 |
+
0.5
|
9 |
+
],
|
10 |
+
"image_processor_type": "ViTImageProcessor",
|
11 |
+
"image_std": [
|
12 |
+
0.5,
|
13 |
+
0.5,
|
14 |
+
0.5
|
15 |
+
],
|
16 |
+
"resample": 2,
|
17 |
+
"rescale_factor": 0.00392156862745098,
|
18 |
+
"size": {
|
19 |
+
"height": 224,
|
20 |
+
"width": 224
|
21 |
+
}
|
22 |
+
}
|
runs/Apr02_15-08-32_612341cc7ac0/events.out.tfevents.1712070513.612341cc7ac0.413.0
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e7cb206780c22166a88d60d8189391c6273df973576cc9fa2e8fa97c645a4110
|
3 |
+
size 22159
|
runs/Apr02_15-17-28_612341cc7ac0/events.out.tfevents.1712071055.612341cc7ac0.413.1
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11357d0dc06784deaed4f81d263b1701b2600e3b01b23fac4eac9e0e723704ad
|
3 |
+
size 23124
|
training_args.bin
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:551a2b710583b4509721cfd8b0a15f284452eb5daebb91ebae234199ab8a77ce
|
3 |
+
size 4920
|