maschenny1 commited on
Commit
827b54e
1 Parent(s): cbd7e9e

End of training

Browse files
Files changed (4) hide show
  1. README.md +59 -0
  2. config.json +230 -0
  3. preprocessor_config.json +22 -0
  4. tf_model.h5 +3 -0
README.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ license: apache-2.0
3
+ base_model: google/vit-base-patch16-224-in21k
4
+ tags:
5
+ - generated_from_keras_callback
6
+ model-index:
7
+ - name: maschenny1/food_classifier
8
+ results: []
9
+ ---
10
+
11
+ <!-- This model card has been generated automatically according to the information Keras had access to. You should
12
+ probably proofread and complete it, then remove this comment. -->
13
+
14
+ # maschenny1/food_classifier
15
+
16
+ This model is a fine-tuned version of [google/vit-base-patch16-224-in21k](https://huggingface.co/google/vit-base-patch16-224-in21k) on an unknown dataset.
17
+ It achieves the following results on the evaluation set:
18
+ - Train Loss: 4.2052
19
+ - Validation Loss: 4.0860
20
+ - Train Accuracy: 1.0
21
+ - Epoch: 4
22
+
23
+ ## Model description
24
+
25
+ More information needed
26
+
27
+ ## Intended uses & limitations
28
+
29
+ More information needed
30
+
31
+ ## Training and evaluation data
32
+
33
+ More information needed
34
+
35
+ ## Training procedure
36
+
37
+ ### Training hyperparameters
38
+
39
+ The following hyperparameters were used during training:
40
+ - optimizer: {'name': 'AdamWeightDecay', 'learning_rate': {'module': 'keras.optimizers.schedules', 'class_name': 'PolynomialDecay', 'config': {'initial_learning_rate': 3e-05, 'decay_steps': 60, 'end_learning_rate': 0.0, 'power': 1.0, 'cycle': False, 'name': None}, 'registered_name': None}, 'decay': 0.0, 'beta_1': 0.9, 'beta_2': 0.999, 'epsilon': 1e-08, 'amsgrad': False, 'weight_decay_rate': 0.01}
41
+ - training_precision: float32
42
+
43
+ ### Training results
44
+
45
+ | Train Loss | Validation Loss | Train Accuracy | Epoch |
46
+ |:----------:|:---------------:|:--------------:|:-----:|
47
+ | 4.5187 | 4.3791 | 1.0 | 0 |
48
+ | 4.4396 | 4.3095 | 1.0 | 1 |
49
+ | 4.3901 | 4.2421 | 1.0 | 2 |
50
+ | 4.2868 | 4.1683 | 1.0 | 3 |
51
+ | 4.2052 | 4.0860 | 1.0 | 4 |
52
+
53
+
54
+ ### Framework versions
55
+
56
+ - Transformers 4.38.2
57
+ - TensorFlow 2.15.0
58
+ - Datasets 2.18.0
59
+ - Tokenizers 0.15.2
config.json ADDED
@@ -0,0 +1,230 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224-in21k",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "1",
13
+ "1": "10",
14
+ "10": "16",
15
+ "100": "98",
16
+ "101": "99",
17
+ "11": "17",
18
+ "12": "18",
19
+ "13": "19",
20
+ "14": "2",
21
+ "15": "20",
22
+ "16": "21",
23
+ "17": "22",
24
+ "18": "23",
25
+ "19": "24",
26
+ "2": "100",
27
+ "20": "25",
28
+ "21": "26",
29
+ "22": "27",
30
+ "23": "28",
31
+ "24": "29",
32
+ "25": "3",
33
+ "26": "30",
34
+ "27": "31",
35
+ "28": "32",
36
+ "29": "33",
37
+ "3": "101",
38
+ "30": "34",
39
+ "31": "35",
40
+ "32": "36",
41
+ "33": "37",
42
+ "34": "38",
43
+ "35": "39",
44
+ "36": "4",
45
+ "37": "40",
46
+ "38": "41",
47
+ "39": "42",
48
+ "4": "102",
49
+ "40": "43",
50
+ "41": "44",
51
+ "42": "45",
52
+ "43": "46",
53
+ "44": "47",
54
+ "45": "48",
55
+ "46": "49",
56
+ "47": "5",
57
+ "48": "50",
58
+ "49": "51",
59
+ "5": "11",
60
+ "50": "52",
61
+ "51": "53",
62
+ "52": "54",
63
+ "53": "55",
64
+ "54": "56",
65
+ "55": "57",
66
+ "56": "58",
67
+ "57": "59",
68
+ "58": "6",
69
+ "59": "60",
70
+ "6": "12",
71
+ "60": "61",
72
+ "61": "62",
73
+ "62": "63",
74
+ "63": "64",
75
+ "64": "65",
76
+ "65": "66",
77
+ "66": "67",
78
+ "67": "68",
79
+ "68": "69",
80
+ "69": "7",
81
+ "7": "13",
82
+ "70": "70",
83
+ "71": "71",
84
+ "72": "72",
85
+ "73": "73",
86
+ "74": "74",
87
+ "75": "75",
88
+ "76": "76",
89
+ "77": "77",
90
+ "78": "78",
91
+ "79": "79",
92
+ "8": "14",
93
+ "80": "8",
94
+ "81": "80",
95
+ "82": "81",
96
+ "83": "82",
97
+ "84": "83",
98
+ "85": "84",
99
+ "86": "85",
100
+ "87": "86",
101
+ "88": "87",
102
+ "89": "88",
103
+ "9": "15",
104
+ "90": "89",
105
+ "91": "9",
106
+ "92": "90",
107
+ "93": "91",
108
+ "94": "92",
109
+ "95": "93",
110
+ "96": "94",
111
+ "97": "95",
112
+ "98": "96",
113
+ "99": "97"
114
+ },
115
+ "image_size": 224,
116
+ "initializer_range": 0.02,
117
+ "intermediate_size": 3072,
118
+ "label2id": {
119
+ "1": "0",
120
+ "10": "1",
121
+ "100": "2",
122
+ "101": "3",
123
+ "102": "4",
124
+ "11": "5",
125
+ "12": "6",
126
+ "13": "7",
127
+ "14": "8",
128
+ "15": "9",
129
+ "16": "10",
130
+ "17": "11",
131
+ "18": "12",
132
+ "19": "13",
133
+ "2": "14",
134
+ "20": "15",
135
+ "21": "16",
136
+ "22": "17",
137
+ "23": "18",
138
+ "24": "19",
139
+ "25": "20",
140
+ "26": "21",
141
+ "27": "22",
142
+ "28": "23",
143
+ "29": "24",
144
+ "3": "25",
145
+ "30": "26",
146
+ "31": "27",
147
+ "32": "28",
148
+ "33": "29",
149
+ "34": "30",
150
+ "35": "31",
151
+ "36": "32",
152
+ "37": "33",
153
+ "38": "34",
154
+ "39": "35",
155
+ "4": "36",
156
+ "40": "37",
157
+ "41": "38",
158
+ "42": "39",
159
+ "43": "40",
160
+ "44": "41",
161
+ "45": "42",
162
+ "46": "43",
163
+ "47": "44",
164
+ "48": "45",
165
+ "49": "46",
166
+ "5": "47",
167
+ "50": "48",
168
+ "51": "49",
169
+ "52": "50",
170
+ "53": "51",
171
+ "54": "52",
172
+ "55": "53",
173
+ "56": "54",
174
+ "57": "55",
175
+ "58": "56",
176
+ "59": "57",
177
+ "6": "58",
178
+ "60": "59",
179
+ "61": "60",
180
+ "62": "61",
181
+ "63": "62",
182
+ "64": "63",
183
+ "65": "64",
184
+ "66": "65",
185
+ "67": "66",
186
+ "68": "67",
187
+ "69": "68",
188
+ "7": "69",
189
+ "70": "70",
190
+ "71": "71",
191
+ "72": "72",
192
+ "73": "73",
193
+ "74": "74",
194
+ "75": "75",
195
+ "76": "76",
196
+ "77": "77",
197
+ "78": "78",
198
+ "79": "79",
199
+ "8": "80",
200
+ "80": "81",
201
+ "81": "82",
202
+ "82": "83",
203
+ "83": "84",
204
+ "84": "85",
205
+ "85": "86",
206
+ "86": "87",
207
+ "87": "88",
208
+ "88": "89",
209
+ "89": "90",
210
+ "9": "91",
211
+ "90": "92",
212
+ "91": "93",
213
+ "92": "94",
214
+ "93": "95",
215
+ "94": "96",
216
+ "95": "97",
217
+ "96": "98",
218
+ "97": "99",
219
+ "98": "100",
220
+ "99": "101"
221
+ },
222
+ "layer_norm_eps": 1e-12,
223
+ "model_type": "vit",
224
+ "num_attention_heads": 12,
225
+ "num_channels": 3,
226
+ "num_hidden_layers": 12,
227
+ "patch_size": 16,
228
+ "qkv_bias": true,
229
+ "transformers_version": "4.38.2"
230
+ }
preprocessor_config.json ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_normalize": true,
3
+ "do_rescale": true,
4
+ "do_resize": true,
5
+ "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
+ ],
10
+ "image_processor_type": "ViTImageProcessor",
11
+ "image_std": [
12
+ 0.5,
13
+ 0.5,
14
+ 0.5
15
+ ],
16
+ "resample": 2,
17
+ "rescale_factor": 0.00392156862745098,
18
+ "size": {
19
+ "height": 224,
20
+ "width": 224
21
+ }
22
+ }
tf_model.h5 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:361c2353368f2ea68a9c841118439400194c66014d993b38c5d5383f87639623
3
+ size 343776952