adhityamw11 commited on
Commit
9c03527
1 Parent(s): f6d2e0f

Upload folder using huggingface_hub

Browse files
checkpoint-14616/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "disguist",
13
+ "1": "anger",
14
+ "2": "fear",
15
+ "3": "happy",
16
+ "4": "surprise",
17
+ "5": "neutral",
18
+ "6": "sad"
19
+ },
20
+ "image_size": 224,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "anger": 1,
25
+ "disguist": 0,
26
+ "fear": 2,
27
+ "happy": 3,
28
+ "neutral": 5,
29
+ "sad": 6,
30
+ "surprise": 4
31
+ },
32
+ "layer_norm_eps": 1e-12,
33
+ "model_type": "vit",
34
+ "num_attention_heads": 12,
35
+ "num_channels": 3,
36
+ "num_hidden_layers": 12,
37
+ "patch_size": 16,
38
+ "problem_type": "single_label_classification",
39
+ "qkv_bias": true,
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.39.3"
42
+ }
checkpoint-14616/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:73c9bc11b6a6c8ad4b674efad682676b150e033e243aa817584f336564d11cb4
3
+ size 343239356
checkpoint-14616/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8eeed73e1d0ebfd568fbd8582d74e127a4523da017d9988ff7c2a659d5a75bfd
3
+ size 686599610
checkpoint-14616/preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
checkpoint-14616/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7d6513867adac690b95294e66a85a01cd3c4cc2c44aed15e66414ecec3c165b
3
+ size 14244
checkpoint-14616/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4e02491a9810330626ef5f61900ae5a59e7685369be8fc8362fbbc9cc56877fb
3
+ size 1064
checkpoint-14616/trainer_state.json ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5935187339782715,
3
+ "best_model_checkpoint": "facial_emotions_image_detection_rafdb_google_vit/checkpoint-14616",
4
+ "epoch": 14.0,
5
+ "eval_steps": 500,
6
+ "global_step": 14616,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.48,
13
+ "grad_norm": 4.908149719238281,
14
+ "learning_rate": 2.9351896303408547e-06,
15
+ "loss": 1.6796,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.96,
20
+ "grad_norm": 6.714761257171631,
21
+ "learning_rate": 2.8631781084973597e-06,
22
+ "loss": 1.1697,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_accuracy": 0.6166883963494133,
28
+ "eval_loss": 1.1752560138702393,
29
+ "eval_runtime": 43.7506,
30
+ "eval_samples_per_second": 70.125,
31
+ "eval_steps_per_second": 4.389,
32
+ "step": 1044
33
+ },
34
+ {
35
+ "epoch": 1.44,
36
+ "grad_norm": 5.284860134124756,
37
+ "learning_rate": 2.7911665866538647e-06,
38
+ "loss": 0.9317,
39
+ "step": 1500
40
+ },
41
+ {
42
+ "epoch": 1.92,
43
+ "grad_norm": 6.737391471862793,
44
+ "learning_rate": 2.7191550648103698e-06,
45
+ "loss": 0.7959,
46
+ "step": 2000
47
+ },
48
+ {
49
+ "epoch": 2.0,
50
+ "eval_accuracy": 0.6988265971316818,
51
+ "eval_loss": 0.9186690449714661,
52
+ "eval_runtime": 27.1747,
53
+ "eval_samples_per_second": 112.899,
54
+ "eval_steps_per_second": 7.065,
55
+ "step": 2088
56
+ },
57
+ {
58
+ "epoch": 2.39,
59
+ "grad_norm": 7.392563819885254,
60
+ "learning_rate": 2.6471435429668748e-06,
61
+ "loss": 0.6865,
62
+ "step": 2500
63
+ },
64
+ {
65
+ "epoch": 2.87,
66
+ "grad_norm": 6.488108158111572,
67
+ "learning_rate": 2.57513202112338e-06,
68
+ "loss": 0.6157,
69
+ "step": 3000
70
+ },
71
+ {
72
+ "epoch": 3.0,
73
+ "eval_accuracy": 0.7314211212516297,
74
+ "eval_loss": 0.798936128616333,
75
+ "eval_runtime": 26.6756,
76
+ "eval_samples_per_second": 115.012,
77
+ "eval_steps_per_second": 7.198,
78
+ "step": 3132
79
+ },
80
+ {
81
+ "epoch": 3.35,
82
+ "grad_norm": 6.2023773193359375,
83
+ "learning_rate": 2.503120499279885e-06,
84
+ "loss": 0.5444,
85
+ "step": 3500
86
+ },
87
+ {
88
+ "epoch": 3.83,
89
+ "grad_norm": 7.885190010070801,
90
+ "learning_rate": 2.43110897743639e-06,
91
+ "loss": 0.4788,
92
+ "step": 4000
93
+ },
94
+ {
95
+ "epoch": 4.0,
96
+ "eval_accuracy": 0.7496740547588006,
97
+ "eval_loss": 0.7347991466522217,
98
+ "eval_runtime": 26.7532,
99
+ "eval_samples_per_second": 114.678,
100
+ "eval_steps_per_second": 7.177,
101
+ "step": 4176
102
+ },
103
+ {
104
+ "epoch": 4.31,
105
+ "grad_norm": 6.879584312438965,
106
+ "learning_rate": 2.359097455592895e-06,
107
+ "loss": 0.4213,
108
+ "step": 4500
109
+ },
110
+ {
111
+ "epoch": 4.79,
112
+ "grad_norm": 5.687563896179199,
113
+ "learning_rate": 2.2870859337494e-06,
114
+ "loss": 0.3859,
115
+ "step": 5000
116
+ },
117
+ {
118
+ "epoch": 5.0,
119
+ "eval_accuracy": 0.7601043024771839,
120
+ "eval_loss": 0.6939254999160767,
121
+ "eval_runtime": 34.3214,
122
+ "eval_samples_per_second": 89.39,
123
+ "eval_steps_per_second": 5.594,
124
+ "step": 5220
125
+ },
126
+ {
127
+ "epoch": 5.27,
128
+ "grad_norm": 8.583109855651855,
129
+ "learning_rate": 2.215074411905905e-06,
130
+ "loss": 0.3529,
131
+ "step": 5500
132
+ },
133
+ {
134
+ "epoch": 5.75,
135
+ "grad_norm": 8.925189971923828,
136
+ "learning_rate": 2.14306289006241e-06,
137
+ "loss": 0.319,
138
+ "step": 6000
139
+ },
140
+ {
141
+ "epoch": 6.0,
142
+ "eval_accuracy": 0.7728161668839635,
143
+ "eval_loss": 0.6545543074607849,
144
+ "eval_runtime": 26.3981,
145
+ "eval_samples_per_second": 116.22,
146
+ "eval_steps_per_second": 7.273,
147
+ "step": 6264
148
+ },
149
+ {
150
+ "epoch": 6.23,
151
+ "grad_norm": 6.763670921325684,
152
+ "learning_rate": 2.071051368218915e-06,
153
+ "loss": 0.2914,
154
+ "step": 6500
155
+ },
156
+ {
157
+ "epoch": 6.7,
158
+ "grad_norm": 5.260948657989502,
159
+ "learning_rate": 1.9990398463754204e-06,
160
+ "loss": 0.2635,
161
+ "step": 7000
162
+ },
163
+ {
164
+ "epoch": 7.0,
165
+ "eval_accuracy": 0.7692307692307693,
166
+ "eval_loss": 0.6396156549453735,
167
+ "eval_runtime": 29.4081,
168
+ "eval_samples_per_second": 104.325,
169
+ "eval_steps_per_second": 6.529,
170
+ "step": 7308
171
+ },
172
+ {
173
+ "epoch": 7.18,
174
+ "grad_norm": 5.159913539886475,
175
+ "learning_rate": 1.9270283245319254e-06,
176
+ "loss": 0.2463,
177
+ "step": 7500
178
+ },
179
+ {
180
+ "epoch": 7.66,
181
+ "grad_norm": 6.131282806396484,
182
+ "learning_rate": 1.8550168026884303e-06,
183
+ "loss": 0.2254,
184
+ "step": 8000
185
+ },
186
+ {
187
+ "epoch": 8.0,
188
+ "eval_accuracy": 0.7829204693611473,
189
+ "eval_loss": 0.6089679598808289,
190
+ "eval_runtime": 26.3772,
191
+ "eval_samples_per_second": 116.313,
192
+ "eval_steps_per_second": 7.279,
193
+ "step": 8352
194
+ },
195
+ {
196
+ "epoch": 8.14,
197
+ "grad_norm": 9.69652271270752,
198
+ "learning_rate": 1.7830052808449353e-06,
199
+ "loss": 0.206,
200
+ "step": 8500
201
+ },
202
+ {
203
+ "epoch": 8.62,
204
+ "grad_norm": 19.338401794433594,
205
+ "learning_rate": 1.71099375900144e-06,
206
+ "loss": 0.189,
207
+ "step": 9000
208
+ },
209
+ {
210
+ "epoch": 9.0,
211
+ "eval_accuracy": 0.7842242503259452,
212
+ "eval_loss": 0.6003961563110352,
213
+ "eval_runtime": 26.7103,
214
+ "eval_samples_per_second": 114.862,
215
+ "eval_steps_per_second": 7.188,
216
+ "step": 9396
217
+ },
218
+ {
219
+ "epoch": 9.1,
220
+ "grad_norm": 7.628934383392334,
221
+ "learning_rate": 1.6389822371579451e-06,
222
+ "loss": 0.1867,
223
+ "step": 9500
224
+ },
225
+ {
226
+ "epoch": 9.58,
227
+ "grad_norm": 7.943005084991455,
228
+ "learning_rate": 1.5669707153144506e-06,
229
+ "loss": 0.1675,
230
+ "step": 10000
231
+ },
232
+ {
233
+ "epoch": 10.0,
234
+ "eval_accuracy": 0.7832464146023468,
235
+ "eval_loss": 0.6076902747154236,
236
+ "eval_runtime": 28.2446,
237
+ "eval_samples_per_second": 108.623,
238
+ "eval_steps_per_second": 6.798,
239
+ "step": 10440
240
+ },
241
+ {
242
+ "epoch": 10.06,
243
+ "grad_norm": 5.741328239440918,
244
+ "learning_rate": 1.4949591934709554e-06,
245
+ "loss": 0.154,
246
+ "step": 10500
247
+ },
248
+ {
249
+ "epoch": 10.54,
250
+ "grad_norm": 12.926642417907715,
251
+ "learning_rate": 1.4229476716274604e-06,
252
+ "loss": 0.1515,
253
+ "step": 11000
254
+ },
255
+ {
256
+ "epoch": 11.0,
257
+ "eval_accuracy": 0.7845501955671447,
258
+ "eval_loss": 0.5965496897697449,
259
+ "eval_runtime": 26.4509,
260
+ "eval_samples_per_second": 115.989,
261
+ "eval_steps_per_second": 7.259,
262
+ "step": 11484
263
+ },
264
+ {
265
+ "epoch": 11.02,
266
+ "grad_norm": 7.206885814666748,
267
+ "learning_rate": 1.3509361497839654e-06,
268
+ "loss": 0.1346,
269
+ "step": 11500
270
+ },
271
+ {
272
+ "epoch": 11.49,
273
+ "grad_norm": 5.869232177734375,
274
+ "learning_rate": 1.2789246279404704e-06,
275
+ "loss": 0.1292,
276
+ "step": 12000
277
+ },
278
+ {
279
+ "epoch": 11.97,
280
+ "grad_norm": 5.3356828689575195,
281
+ "learning_rate": 1.2069131060969755e-06,
282
+ "loss": 0.1212,
283
+ "step": 12500
284
+ },
285
+ {
286
+ "epoch": 12.0,
287
+ "eval_accuracy": 0.7868318122555411,
288
+ "eval_loss": 0.5986641645431519,
289
+ "eval_runtime": 28.927,
290
+ "eval_samples_per_second": 106.06,
291
+ "eval_steps_per_second": 6.637,
292
+ "step": 12528
293
+ },
294
+ {
295
+ "epoch": 12.45,
296
+ "grad_norm": 4.9229350090026855,
297
+ "learning_rate": 1.1349015842534805e-06,
298
+ "loss": 0.1127,
299
+ "step": 13000
300
+ },
301
+ {
302
+ "epoch": 12.93,
303
+ "grad_norm": 2.7797281742095947,
304
+ "learning_rate": 1.0628900624099857e-06,
305
+ "loss": 0.1147,
306
+ "step": 13500
307
+ },
308
+ {
309
+ "epoch": 13.0,
310
+ "eval_accuracy": 0.7891134289439374,
311
+ "eval_loss": 0.6010680794715881,
312
+ "eval_runtime": 26.5314,
313
+ "eval_samples_per_second": 115.637,
314
+ "eval_steps_per_second": 7.237,
315
+ "step": 13572
316
+ },
317
+ {
318
+ "epoch": 13.41,
319
+ "grad_norm": 6.798295497894287,
320
+ "learning_rate": 9.908785405664905e-07,
321
+ "loss": 0.1059,
322
+ "step": 14000
323
+ },
324
+ {
325
+ "epoch": 13.89,
326
+ "grad_norm": 7.220839500427246,
327
+ "learning_rate": 9.188670187229958e-07,
328
+ "loss": 0.0989,
329
+ "step": 14500
330
+ },
331
+ {
332
+ "epoch": 14.0,
333
+ "eval_accuracy": 0.795632333767927,
334
+ "eval_loss": 0.5935187339782715,
335
+ "eval_runtime": 26.5413,
336
+ "eval_samples_per_second": 115.593,
337
+ "eval_steps_per_second": 7.234,
338
+ "step": 14616
339
+ }
340
+ ],
341
+ "logging_steps": 500,
342
+ "max_steps": 20880,
343
+ "num_input_tokens_seen": 0,
344
+ "num_train_epochs": 20,
345
+ "save_steps": 500,
346
+ "total_flos": 3.6241217915288666e+19,
347
+ "train_batch_size": 32,
348
+ "trial_name": null,
349
+ "trial_params": null
350
+ }
checkpoint-14616/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61935e964cb014773997bd6603b4e4c4a0a91af98ad1de8925b113ef61252125
3
+ size 4920
checkpoint-17748/config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
+ "architectures": [
4
+ "ViTForImageClassification"
5
+ ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
+ "id2label": {
12
+ "0": "disguist",
13
+ "1": "anger",
14
+ "2": "fear",
15
+ "3": "happy",
16
+ "4": "surprise",
17
+ "5": "neutral",
18
+ "6": "sad"
19
+ },
20
+ "image_size": 224,
21
+ "initializer_range": 0.02,
22
+ "intermediate_size": 3072,
23
+ "label2id": {
24
+ "anger": 1,
25
+ "disguist": 0,
26
+ "fear": 2,
27
+ "happy": 3,
28
+ "neutral": 5,
29
+ "sad": 6,
30
+ "surprise": 4
31
+ },
32
+ "layer_norm_eps": 1e-12,
33
+ "model_type": "vit",
34
+ "num_attention_heads": 12,
35
+ "num_channels": 3,
36
+ "num_hidden_layers": 12,
37
+ "patch_size": 16,
38
+ "problem_type": "single_label_classification",
39
+ "qkv_bias": true,
40
+ "torch_dtype": "float32",
41
+ "transformers_version": "4.39.3"
42
+ }
checkpoint-17748/model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c24f766742d6293755a3cbb94446ae1043b7ca2a1a63fb01258b0aa80584689
3
+ size 343239356
checkpoint-17748/optimizer.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bf56edad8d1de532c865621ada2fb5f32d3b501a9884d3bb823082344249c70d
3
+ size 686599610
checkpoint-17748/preprocessor_config.json ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_valid_processor_keys": [
3
+ "images",
4
+ "do_resize",
5
+ "size",
6
+ "resample",
7
+ "do_rescale",
8
+ "rescale_factor",
9
+ "do_normalize",
10
+ "image_mean",
11
+ "image_std",
12
+ "return_tensors",
13
+ "data_format",
14
+ "input_data_format"
15
+ ],
16
+ "do_normalize": true,
17
+ "do_rescale": true,
18
+ "do_resize": true,
19
+ "image_mean": [
20
+ 0.5,
21
+ 0.5,
22
+ 0.5
23
+ ],
24
+ "image_processor_type": "ViTImageProcessor",
25
+ "image_std": [
26
+ 0.5,
27
+ 0.5,
28
+ 0.5
29
+ ],
30
+ "resample": 2,
31
+ "rescale_factor": 0.00392156862745098,
32
+ "size": {
33
+ "height": 224,
34
+ "width": 224
35
+ }
36
+ }
checkpoint-17748/rng_state.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf287b8b3094e74830aac58c786d3a1ac1c694f32db1ad4cb77047b0ecd80fe3
3
+ size 14244
checkpoint-17748/scheduler.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7c85352a3ece7a837d6e30238104b0d0afe0a39d7fc4fc1f471824f396d1d1e
3
+ size 1064
checkpoint-17748/trainer_state.json ADDED
@@ -0,0 +1,419 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5935187339782715,
3
+ "best_model_checkpoint": "facial_emotions_image_detection_rafdb_google_vit/checkpoint-14616",
4
+ "epoch": 17.0,
5
+ "eval_steps": 500,
6
+ "global_step": 17748,
7
+ "is_hyper_param_search": false,
8
+ "is_local_process_zero": true,
9
+ "is_world_process_zero": true,
10
+ "log_history": [
11
+ {
12
+ "epoch": 0.48,
13
+ "grad_norm": 4.908149719238281,
14
+ "learning_rate": 2.9351896303408547e-06,
15
+ "loss": 1.6796,
16
+ "step": 500
17
+ },
18
+ {
19
+ "epoch": 0.96,
20
+ "grad_norm": 6.714761257171631,
21
+ "learning_rate": 2.8631781084973597e-06,
22
+ "loss": 1.1697,
23
+ "step": 1000
24
+ },
25
+ {
26
+ "epoch": 1.0,
27
+ "eval_accuracy": 0.6166883963494133,
28
+ "eval_loss": 1.1752560138702393,
29
+ "eval_runtime": 43.7506,
30
+ "eval_samples_per_second": 70.125,
31
+ "eval_steps_per_second": 4.389,
32
+ "step": 1044
33
+ },
34
+ {
35
+ "epoch": 1.44,
36
+ "grad_norm": 5.284860134124756,
37
+ "learning_rate": 2.7911665866538647e-06,
38
+ "loss": 0.9317,
39
+ "step": 1500
40
+ },
41
+ {
42
+ "epoch": 1.92,
43
+ "grad_norm": 6.737391471862793,
44
+ "learning_rate": 2.7191550648103698e-06,
45
+ "loss": 0.7959,
46
+ "step": 2000
47
+ },
48
+ {
49
+ "epoch": 2.0,
50
+ "eval_accuracy": 0.6988265971316818,
51
+ "eval_loss": 0.9186690449714661,
52
+ "eval_runtime": 27.1747,
53
+ "eval_samples_per_second": 112.899,
54
+ "eval_steps_per_second": 7.065,
55
+ "step": 2088
56
+ },
57
+ {
58
+ "epoch": 2.39,
59
+ "grad_norm": 7.392563819885254,
60
+ "learning_rate": 2.6471435429668748e-06,
61
+ "loss": 0.6865,
62
+ "step": 2500
63
+ },
64
+ {
65
+ "epoch": 2.87,
66
+ "grad_norm": 6.488108158111572,
67
+ "learning_rate": 2.57513202112338e-06,
68
+ "loss": 0.6157,
69
+ "step": 3000
70
+ },
71
+ {
72
+ "epoch": 3.0,
73
+ "eval_accuracy": 0.7314211212516297,
74
+ "eval_loss": 0.798936128616333,
75
+ "eval_runtime": 26.6756,
76
+ "eval_samples_per_second": 115.012,
77
+ "eval_steps_per_second": 7.198,
78
+ "step": 3132
79
+ },
80
+ {
81
+ "epoch": 3.35,
82
+ "grad_norm": 6.2023773193359375,
83
+ "learning_rate": 2.503120499279885e-06,
84
+ "loss": 0.5444,
85
+ "step": 3500
86
+ },
87
+ {
88
+ "epoch": 3.83,
89
+ "grad_norm": 7.885190010070801,
90
+ "learning_rate": 2.43110897743639e-06,
91
+ "loss": 0.4788,
92
+ "step": 4000
93
+ },
94
+ {
95
+ "epoch": 4.0,
96
+ "eval_accuracy": 0.7496740547588006,
97
+ "eval_loss": 0.7347991466522217,
98
+ "eval_runtime": 26.7532,
99
+ "eval_samples_per_second": 114.678,
100
+ "eval_steps_per_second": 7.177,
101
+ "step": 4176
102
+ },
103
+ {
104
+ "epoch": 4.31,
105
+ "grad_norm": 6.879584312438965,
106
+ "learning_rate": 2.359097455592895e-06,
107
+ "loss": 0.4213,
108
+ "step": 4500
109
+ },
110
+ {
111
+ "epoch": 4.79,
112
+ "grad_norm": 5.687563896179199,
113
+ "learning_rate": 2.2870859337494e-06,
114
+ "loss": 0.3859,
115
+ "step": 5000
116
+ },
117
+ {
118
+ "epoch": 5.0,
119
+ "eval_accuracy": 0.7601043024771839,
120
+ "eval_loss": 0.6939254999160767,
121
+ "eval_runtime": 34.3214,
122
+ "eval_samples_per_second": 89.39,
123
+ "eval_steps_per_second": 5.594,
124
+ "step": 5220
125
+ },
126
+ {
127
+ "epoch": 5.27,
128
+ "grad_norm": 8.583109855651855,
129
+ "learning_rate": 2.215074411905905e-06,
130
+ "loss": 0.3529,
131
+ "step": 5500
132
+ },
133
+ {
134
+ "epoch": 5.75,
135
+ "grad_norm": 8.925189971923828,
136
+ "learning_rate": 2.14306289006241e-06,
137
+ "loss": 0.319,
138
+ "step": 6000
139
+ },
140
+ {
141
+ "epoch": 6.0,
142
+ "eval_accuracy": 0.7728161668839635,
143
+ "eval_loss": 0.6545543074607849,
144
+ "eval_runtime": 26.3981,
145
+ "eval_samples_per_second": 116.22,
146
+ "eval_steps_per_second": 7.273,
147
+ "step": 6264
148
+ },
149
+ {
150
+ "epoch": 6.23,
151
+ "grad_norm": 6.763670921325684,
152
+ "learning_rate": 2.071051368218915e-06,
153
+ "loss": 0.2914,
154
+ "step": 6500
155
+ },
156
+ {
157
+ "epoch": 6.7,
158
+ "grad_norm": 5.260948657989502,
159
+ "learning_rate": 1.9990398463754204e-06,
160
+ "loss": 0.2635,
161
+ "step": 7000
162
+ },
163
+ {
164
+ "epoch": 7.0,
165
+ "eval_accuracy": 0.7692307692307693,
166
+ "eval_loss": 0.6396156549453735,
167
+ "eval_runtime": 29.4081,
168
+ "eval_samples_per_second": 104.325,
169
+ "eval_steps_per_second": 6.529,
170
+ "step": 7308
171
+ },
172
+ {
173
+ "epoch": 7.18,
174
+ "grad_norm": 5.159913539886475,
175
+ "learning_rate": 1.9270283245319254e-06,
176
+ "loss": 0.2463,
177
+ "step": 7500
178
+ },
179
+ {
180
+ "epoch": 7.66,
181
+ "grad_norm": 6.131282806396484,
182
+ "learning_rate": 1.8550168026884303e-06,
183
+ "loss": 0.2254,
184
+ "step": 8000
185
+ },
186
+ {
187
+ "epoch": 8.0,
188
+ "eval_accuracy": 0.7829204693611473,
189
+ "eval_loss": 0.6089679598808289,
190
+ "eval_runtime": 26.3772,
191
+ "eval_samples_per_second": 116.313,
192
+ "eval_steps_per_second": 7.279,
193
+ "step": 8352
194
+ },
195
+ {
196
+ "epoch": 8.14,
197
+ "grad_norm": 9.69652271270752,
198
+ "learning_rate": 1.7830052808449353e-06,
199
+ "loss": 0.206,
200
+ "step": 8500
201
+ },
202
+ {
203
+ "epoch": 8.62,
204
+ "grad_norm": 19.338401794433594,
205
+ "learning_rate": 1.71099375900144e-06,
206
+ "loss": 0.189,
207
+ "step": 9000
208
+ },
209
+ {
210
+ "epoch": 9.0,
211
+ "eval_accuracy": 0.7842242503259452,
212
+ "eval_loss": 0.6003961563110352,
213
+ "eval_runtime": 26.7103,
214
+ "eval_samples_per_second": 114.862,
215
+ "eval_steps_per_second": 7.188,
216
+ "step": 9396
217
+ },
218
+ {
219
+ "epoch": 9.1,
220
+ "grad_norm": 7.628934383392334,
221
+ "learning_rate": 1.6389822371579451e-06,
222
+ "loss": 0.1867,
223
+ "step": 9500
224
+ },
225
+ {
226
+ "epoch": 9.58,
227
+ "grad_norm": 7.943005084991455,
228
+ "learning_rate": 1.5669707153144506e-06,
229
+ "loss": 0.1675,
230
+ "step": 10000
231
+ },
232
+ {
233
+ "epoch": 10.0,
234
+ "eval_accuracy": 0.7832464146023468,
235
+ "eval_loss": 0.6076902747154236,
236
+ "eval_runtime": 28.2446,
237
+ "eval_samples_per_second": 108.623,
238
+ "eval_steps_per_second": 6.798,
239
+ "step": 10440
240
+ },
241
+ {
242
+ "epoch": 10.06,
243
+ "grad_norm": 5.741328239440918,
244
+ "learning_rate": 1.4949591934709554e-06,
245
+ "loss": 0.154,
246
+ "step": 10500
247
+ },
248
+ {
249
+ "epoch": 10.54,
250
+ "grad_norm": 12.926642417907715,
251
+ "learning_rate": 1.4229476716274604e-06,
252
+ "loss": 0.1515,
253
+ "step": 11000
254
+ },
255
+ {
256
+ "epoch": 11.0,
257
+ "eval_accuracy": 0.7845501955671447,
258
+ "eval_loss": 0.5965496897697449,
259
+ "eval_runtime": 26.4509,
260
+ "eval_samples_per_second": 115.989,
261
+ "eval_steps_per_second": 7.259,
262
+ "step": 11484
263
+ },
264
+ {
265
+ "epoch": 11.02,
266
+ "grad_norm": 7.206885814666748,
267
+ "learning_rate": 1.3509361497839654e-06,
268
+ "loss": 0.1346,
269
+ "step": 11500
270
+ },
271
+ {
272
+ "epoch": 11.49,
273
+ "grad_norm": 5.869232177734375,
274
+ "learning_rate": 1.2789246279404704e-06,
275
+ "loss": 0.1292,
276
+ "step": 12000
277
+ },
278
+ {
279
+ "epoch": 11.97,
280
+ "grad_norm": 5.3356828689575195,
281
+ "learning_rate": 1.2069131060969755e-06,
282
+ "loss": 0.1212,
283
+ "step": 12500
284
+ },
285
+ {
286
+ "epoch": 12.0,
287
+ "eval_accuracy": 0.7868318122555411,
288
+ "eval_loss": 0.5986641645431519,
289
+ "eval_runtime": 28.927,
290
+ "eval_samples_per_second": 106.06,
291
+ "eval_steps_per_second": 6.637,
292
+ "step": 12528
293
+ },
294
+ {
295
+ "epoch": 12.45,
296
+ "grad_norm": 4.9229350090026855,
297
+ "learning_rate": 1.1349015842534805e-06,
298
+ "loss": 0.1127,
299
+ "step": 13000
300
+ },
301
+ {
302
+ "epoch": 12.93,
303
+ "grad_norm": 2.7797281742095947,
304
+ "learning_rate": 1.0628900624099857e-06,
305
+ "loss": 0.1147,
306
+ "step": 13500
307
+ },
308
+ {
309
+ "epoch": 13.0,
310
+ "eval_accuracy": 0.7891134289439374,
311
+ "eval_loss": 0.6010680794715881,
312
+ "eval_runtime": 26.5314,
313
+ "eval_samples_per_second": 115.637,
314
+ "eval_steps_per_second": 7.237,
315
+ "step": 13572
316
+ },
317
+ {
318
+ "epoch": 13.41,
319
+ "grad_norm": 6.798295497894287,
320
+ "learning_rate": 9.908785405664905e-07,
321
+ "loss": 0.1059,
322
+ "step": 14000
323
+ },
324
+ {
325
+ "epoch": 13.89,
326
+ "grad_norm": 7.220839500427246,
327
+ "learning_rate": 9.188670187229958e-07,
328
+ "loss": 0.0989,
329
+ "step": 14500
330
+ },
331
+ {
332
+ "epoch": 14.0,
333
+ "eval_accuracy": 0.795632333767927,
334
+ "eval_loss": 0.5935187339782715,
335
+ "eval_runtime": 26.5413,
336
+ "eval_samples_per_second": 115.593,
337
+ "eval_steps_per_second": 7.234,
338
+ "step": 14616
339
+ },
340
+ {
341
+ "epoch": 14.37,
342
+ "grad_norm": 7.945505619049072,
343
+ "learning_rate": 8.468554968795008e-07,
344
+ "loss": 0.0972,
345
+ "step": 15000
346
+ },
347
+ {
348
+ "epoch": 14.85,
349
+ "grad_norm": 2.5897045135498047,
350
+ "learning_rate": 7.748439750360057e-07,
351
+ "loss": 0.0915,
352
+ "step": 15500
353
+ },
354
+ {
355
+ "epoch": 15.0,
356
+ "eval_accuracy": 0.7940026075619296,
357
+ "eval_loss": 0.5987181663513184,
358
+ "eval_runtime": 26.6784,
359
+ "eval_samples_per_second": 114.999,
360
+ "eval_steps_per_second": 7.197,
361
+ "step": 15660
362
+ },
363
+ {
364
+ "epoch": 15.33,
365
+ "grad_norm": 5.564732074737549,
366
+ "learning_rate": 7.028324531925108e-07,
367
+ "loss": 0.0898,
368
+ "step": 16000
369
+ },
370
+ {
371
+ "epoch": 15.8,
372
+ "grad_norm": 2.869757890701294,
373
+ "learning_rate": 6.308209313490159e-07,
374
+ "loss": 0.0887,
375
+ "step": 16500
376
+ },
377
+ {
378
+ "epoch": 16.0,
379
+ "eval_accuracy": 0.7966101694915254,
380
+ "eval_loss": 0.6017541885375977,
381
+ "eval_runtime": 29.5882,
382
+ "eval_samples_per_second": 103.69,
383
+ "eval_steps_per_second": 6.489,
384
+ "step": 16704
385
+ },
386
+ {
387
+ "epoch": 16.28,
388
+ "grad_norm": 10.52423095703125,
389
+ "learning_rate": 5.588094095055209e-07,
390
+ "loss": 0.0812,
391
+ "step": 17000
392
+ },
393
+ {
394
+ "epoch": 16.76,
395
+ "grad_norm": 2.270158052444458,
396
+ "learning_rate": 4.867978876620259e-07,
397
+ "loss": 0.0857,
398
+ "step": 17500
399
+ },
400
+ {
401
+ "epoch": 17.0,
402
+ "eval_accuracy": 0.7985658409387223,
403
+ "eval_loss": 0.6071637272834778,
404
+ "eval_runtime": 32.54,
405
+ "eval_samples_per_second": 94.284,
406
+ "eval_steps_per_second": 5.9,
407
+ "step": 17748
408
+ }
409
+ ],
410
+ "logging_steps": 500,
411
+ "max_steps": 20880,
412
+ "num_input_tokens_seen": 0,
413
+ "num_train_epochs": 20,
414
+ "save_steps": 500,
415
+ "total_flos": 4.400719318285052e+19,
416
+ "train_batch_size": 32,
417
+ "trial_name": null,
418
+ "trial_params": null
419
+ }
checkpoint-17748/training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:61935e964cb014773997bd6603b4e4c4a0a91af98ad1de8925b113ef61252125
3
+ size 4920