paul commited on
Commit
5059a70
1 Parent(s): ce01eb3

Training in progress, epoch 0

Browse files
all_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 7.403951867151974e+17,
4
+ "train_loss": 1.1767685549599785,
5
+ "train_runtime": 421.2758,
6
+ "train_samples_per_second": 82.749,
7
+ "train_steps_per_second": 0.332
8
+ }
config.json CHANGED
@@ -1,39 +1,36 @@
1
  {
2
- "_name_or_path": "microsoft/resnet-50",
3
  "architectures": [
4
- "ResNetForImageClassification"
5
- ],
6
- "depths": [
7
- 3,
8
- 4,
9
- 6,
10
- 3
11
- ],
12
- "downsample_in_first_stage": false,
13
- "embedding_size": 64,
14
- "hidden_act": "relu",
15
- "hidden_sizes": [
16
- 256,
17
- 512,
18
- 1024,
19
- 2048
20
  ],
 
 
 
 
 
21
  "id2label": {
22
  "0": "angry",
23
  "1": "happy",
24
  "2": "neutral",
25
  "3": "sad"
26
  },
 
 
 
27
  "label2id": {
28
  "angry": "0",
29
  "happy": "1",
30
  "neutral": "2",
31
  "sad": "3"
32
  },
33
- "layer_type": "bottleneck",
34
- "model_type": "resnet",
 
35
  "num_channels": 3,
 
 
36
  "problem_type": "single_label_classification",
 
37
  "torch_dtype": "float32",
38
  "transformers_version": "4.24.0.dev0"
39
  }
 
1
  {
2
+ "_name_or_path": "google/vit-base-patch16-224",
3
  "architectures": [
4
+ "ViTForImageClassification"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
5
  ],
6
+ "attention_probs_dropout_prob": 0.0,
7
+ "encoder_stride": 16,
8
+ "hidden_act": "gelu",
9
+ "hidden_dropout_prob": 0.0,
10
+ "hidden_size": 768,
11
  "id2label": {
12
  "0": "angry",
13
  "1": "happy",
14
  "2": "neutral",
15
  "3": "sad"
16
  },
17
+ "image_size": 224,
18
+ "initializer_range": 0.02,
19
+ "intermediate_size": 3072,
20
  "label2id": {
21
  "angry": "0",
22
  "happy": "1",
23
  "neutral": "2",
24
  "sad": "3"
25
  },
26
+ "layer_norm_eps": 1e-12,
27
+ "model_type": "vit",
28
+ "num_attention_heads": 12,
29
  "num_channels": 3,
30
+ "num_hidden_layers": 12,
31
+ "patch_size": 16,
32
  "problem_type": "single_label_classification",
33
+ "qkv_bias": true,
34
  "torch_dtype": "float32",
35
  "transformers_version": "4.24.0.dev0"
36
  }
preprocessor_config.json CHANGED
@@ -1,18 +1,17 @@
1
  {
2
- "crop_pct": 0.875,
3
  "do_normalize": true,
4
  "do_resize": true,
5
- "feature_extractor_type": "ConvNextFeatureExtractor",
6
  "image_mean": [
7
- 0.485,
8
- 0.456,
9
- 0.406
10
  ],
11
  "image_std": [
12
- 0.229,
13
- 0.224,
14
- 0.225
15
  ],
16
- "resample": 3,
17
  "size": 224
18
  }
 
1
  {
 
2
  "do_normalize": true,
3
  "do_resize": true,
4
+ "feature_extractor_type": "ViTFeatureExtractor",
5
  "image_mean": [
6
+ 0.5,
7
+ 0.5,
8
+ 0.5
9
  ],
10
  "image_std": [
11
+ 0.5,
12
+ 0.5,
13
+ 0.5
14
  ],
15
+ "resample": 2,
16
  "size": 224
17
  }
pytorch_model.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:9ef584acc2a5466b4b507dc258c6e8a8cae7e08b4c9e7e6348875e4be3c29d1b
3
- size 94389057
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:db408178bedaa6a5998bbb33212b113713f11eca99a0d912b0f908332844cca6
3
+ size 343273137
runs/Jan21_17-13-54_teesta/events.out.tfevents.1674301465.teesta.3779.0 CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:894350261429ceb0c9229a393e93365ccd297025f2982140e736457e8d3b65e2
3
- size 7630
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ac1fbc022de3170b19d2227e0b53ffe7cce1dc5b96de754201894eb433c5e931
3
+ size 15567
runs/Jan21_17-25-09_teesta/1674302123.4912503/events.out.tfevents.1674302123.teesta.7894.1 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:965e196ff0c4b59fda1585981e171208c301e747b37824dd74dc199cc67bee5d
3
+ size 5550
runs/Jan21_17-25-09_teesta/events.out.tfevents.1674302123.teesta.7894.0 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:2d9175c9096796091c491259ff069f27b0176aad150041384a2d7603a97c818f
3
+ size 4327
train_results.json ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "epoch": 20.0,
3
+ "total_flos": 7.403951867151974e+17,
4
+ "train_loss": 1.1767685549599785,
5
+ "train_runtime": 421.2758,
6
+ "train_samples_per_second": 82.749,
7
+ "train_steps_per_second": 0.332
8
+ }
trainer_state.json ADDED
@@ -0,0 +1,349 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "best_metric": 0.5711009174311926,
3
+ "best_model_checkpoint": "microsoft-resnet-50-cartoon-emotion-detection/checkpoint-133",
4
+ "epoch": 20.0,
5
+ "global_step": 140,
6
+ "is_hyper_param_search": false,
7
+ "is_local_process_zero": true,
8
+ "is_world_process_zero": true,
9
+ "log_history": [
10
+ {
11
+ "epoch": 1.0,
12
+ "eval_accuracy": 0.25688073394495414,
13
+ "eval_f1": 0.22448538034486407,
14
+ "eval_loss": 1.380691409111023,
15
+ "eval_precision": 0.234238207374522,
16
+ "eval_recall": 0.25688073394495414,
17
+ "eval_runtime": 6.8682,
18
+ "eval_samples_per_second": 63.481,
19
+ "eval_steps_per_second": 1.019,
20
+ "step": 7
21
+ },
22
+ {
23
+ "epoch": 1.43,
24
+ "learning_rate": 8.571428571428571e-05,
25
+ "loss": 1.3806,
26
+ "step": 10
27
+ },
28
+ {
29
+ "epoch": 2.0,
30
+ "eval_accuracy": 0.36009174311926606,
31
+ "eval_f1": 0.27057592620444176,
32
+ "eval_loss": 1.3620387315750122,
33
+ "eval_precision": 0.3636846359984286,
34
+ "eval_recall": 0.36009174311926606,
35
+ "eval_runtime": 6.2511,
36
+ "eval_samples_per_second": 69.748,
37
+ "eval_steps_per_second": 1.12,
38
+ "step": 14
39
+ },
40
+ {
41
+ "epoch": 2.86,
42
+ "learning_rate": 0.00011428571428571428,
43
+ "loss": 1.3591,
44
+ "step": 20
45
+ },
46
+ {
47
+ "epoch": 3.0,
48
+ "eval_accuracy": 0.3669724770642202,
49
+ "eval_f1": 0.24291286665824255,
50
+ "eval_loss": 1.3456730842590332,
51
+ "eval_precision": 0.5651041088181502,
52
+ "eval_recall": 0.3669724770642202,
53
+ "eval_runtime": 5.6933,
54
+ "eval_samples_per_second": 76.582,
55
+ "eval_steps_per_second": 1.23,
56
+ "step": 21
57
+ },
58
+ {
59
+ "epoch": 4.0,
60
+ "eval_accuracy": 0.3738532110091743,
61
+ "eval_f1": 0.2447258618599714,
62
+ "eval_loss": 1.3271173238754272,
63
+ "eval_precision": 0.7011109363353202,
64
+ "eval_recall": 0.3738532110091743,
65
+ "eval_runtime": 5.6438,
66
+ "eval_samples_per_second": 77.253,
67
+ "eval_steps_per_second": 1.24,
68
+ "step": 28
69
+ },
70
+ {
71
+ "epoch": 4.29,
72
+ "learning_rate": 0.00010476190476190477,
73
+ "loss": 1.3344,
74
+ "step": 30
75
+ },
76
+ {
77
+ "epoch": 5.0,
78
+ "eval_accuracy": 0.3922018348623853,
79
+ "eval_f1": 0.27454202142043693,
80
+ "eval_loss": 1.3076415061950684,
81
+ "eval_precision": 0.49126041854050495,
82
+ "eval_recall": 0.3922018348623853,
83
+ "eval_runtime": 5.7497,
84
+ "eval_samples_per_second": 75.83,
85
+ "eval_steps_per_second": 1.217,
86
+ "step": 35
87
+ },
88
+ {
89
+ "epoch": 5.71,
90
+ "learning_rate": 9.523809523809524e-05,
91
+ "loss": 1.3035,
92
+ "step": 40
93
+ },
94
+ {
95
+ "epoch": 6.0,
96
+ "eval_accuracy": 0.4197247706422018,
97
+ "eval_f1": 0.31223198015887665,
98
+ "eval_loss": 1.2836155891418457,
99
+ "eval_precision": 0.4326493440983179,
100
+ "eval_recall": 0.4197247706422018,
101
+ "eval_runtime": 6.0675,
102
+ "eval_samples_per_second": 71.858,
103
+ "eval_steps_per_second": 1.154,
104
+ "step": 42
105
+ },
106
+ {
107
+ "epoch": 7.0,
108
+ "eval_accuracy": 0.4426605504587156,
109
+ "eval_f1": 0.341877970822925,
110
+ "eval_loss": 1.2649825811386108,
111
+ "eval_precision": 0.49930570990337153,
112
+ "eval_recall": 0.4426605504587156,
113
+ "eval_runtime": 5.7091,
114
+ "eval_samples_per_second": 76.37,
115
+ "eval_steps_per_second": 1.226,
116
+ "step": 49
117
+ },
118
+ {
119
+ "epoch": 7.14,
120
+ "learning_rate": 8.571428571428571e-05,
121
+ "loss": 1.2692,
122
+ "step": 50
123
+ },
124
+ {
125
+ "epoch": 8.0,
126
+ "eval_accuracy": 0.4701834862385321,
127
+ "eval_f1": 0.3759489302967564,
128
+ "eval_loss": 1.2467858791351318,
129
+ "eval_precision": 0.47456408321388993,
130
+ "eval_recall": 0.4701834862385321,
131
+ "eval_runtime": 5.6802,
132
+ "eval_samples_per_second": 76.758,
133
+ "eval_steps_per_second": 1.232,
134
+ "step": 56
135
+ },
136
+ {
137
+ "epoch": 8.57,
138
+ "learning_rate": 7.619047619047618e-05,
139
+ "loss": 1.2271,
140
+ "step": 60
141
+ },
142
+ {
143
+ "epoch": 9.0,
144
+ "eval_accuracy": 0.47706422018348627,
145
+ "eval_f1": 0.38295063344511215,
146
+ "eval_loss": 1.2168828248977661,
147
+ "eval_precision": 0.4426797391546095,
148
+ "eval_recall": 0.47706422018348627,
149
+ "eval_runtime": 5.7354,
150
+ "eval_samples_per_second": 76.019,
151
+ "eval_steps_per_second": 1.22,
152
+ "step": 63
153
+ },
154
+ {
155
+ "epoch": 10.0,
156
+ "learning_rate": 6.666666666666667e-05,
157
+ "loss": 1.1897,
158
+ "step": 70
159
+ },
160
+ {
161
+ "epoch": 10.0,
162
+ "eval_accuracy": 0.4908256880733945,
163
+ "eval_f1": 0.40018913464018235,
164
+ "eval_loss": 1.1914395093917847,
165
+ "eval_precision": 0.4465401552610121,
166
+ "eval_recall": 0.4908256880733945,
167
+ "eval_runtime": 5.8725,
168
+ "eval_samples_per_second": 74.244,
169
+ "eval_steps_per_second": 1.192,
170
+ "step": 70
171
+ },
172
+ {
173
+ "epoch": 11.0,
174
+ "eval_accuracy": 0.5137614678899083,
175
+ "eval_f1": 0.4264259625764651,
176
+ "eval_loss": 1.1677685976028442,
177
+ "eval_precision": 0.6977269943419414,
178
+ "eval_recall": 0.5137614678899083,
179
+ "eval_runtime": 5.9101,
180
+ "eval_samples_per_second": 73.772,
181
+ "eval_steps_per_second": 1.184,
182
+ "step": 77
183
+ },
184
+ {
185
+ "epoch": 11.43,
186
+ "learning_rate": 5.714285714285714e-05,
187
+ "loss": 1.1449,
188
+ "step": 80
189
+ },
190
+ {
191
+ "epoch": 12.0,
192
+ "eval_accuracy": 0.5137614678899083,
193
+ "eval_f1": 0.42019701825907374,
194
+ "eval_loss": 1.1410157680511475,
195
+ "eval_precision": 0.4223060434667934,
196
+ "eval_recall": 0.5137614678899083,
197
+ "eval_runtime": 5.68,
198
+ "eval_samples_per_second": 76.761,
199
+ "eval_steps_per_second": 1.232,
200
+ "step": 84
201
+ },
202
+ {
203
+ "epoch": 12.86,
204
+ "learning_rate": 4.761904761904762e-05,
205
+ "loss": 1.1103,
206
+ "step": 90
207
+ },
208
+ {
209
+ "epoch": 13.0,
210
+ "eval_accuracy": 0.5435779816513762,
211
+ "eval_f1": 0.45463643608379634,
212
+ "eval_loss": 1.1230803728103638,
213
+ "eval_precision": 0.4453251266498374,
214
+ "eval_recall": 0.5435779816513762,
215
+ "eval_runtime": 5.6703,
216
+ "eval_samples_per_second": 76.892,
217
+ "eval_steps_per_second": 1.235,
218
+ "step": 91
219
+ },
220
+ {
221
+ "epoch": 14.0,
222
+ "eval_accuracy": 0.5527522935779816,
223
+ "eval_f1": 0.46754301809966226,
224
+ "eval_loss": 1.102858066558838,
225
+ "eval_precision": 0.4585875088630616,
226
+ "eval_recall": 0.5527522935779816,
227
+ "eval_runtime": 5.6798,
228
+ "eval_samples_per_second": 76.764,
229
+ "eval_steps_per_second": 1.232,
230
+ "step": 98
231
+ },
232
+ {
233
+ "epoch": 14.29,
234
+ "learning_rate": 3.809523809523809e-05,
235
+ "loss": 1.0763,
236
+ "step": 100
237
+ },
238
+ {
239
+ "epoch": 15.0,
240
+ "eval_accuracy": 0.5458715596330275,
241
+ "eval_f1": 0.4634974694985357,
242
+ "eval_loss": 1.0851385593414307,
243
+ "eval_precision": 0.6996862082871257,
244
+ "eval_recall": 0.5458715596330275,
245
+ "eval_runtime": 6.8058,
246
+ "eval_samples_per_second": 64.063,
247
+ "eval_steps_per_second": 1.029,
248
+ "step": 105
249
+ },
250
+ {
251
+ "epoch": 15.71,
252
+ "learning_rate": 2.857142857142857e-05,
253
+ "loss": 1.0496,
254
+ "step": 110
255
+ },
256
+ {
257
+ "epoch": 16.0,
258
+ "eval_accuracy": 0.5688073394495413,
259
+ "eval_f1": 0.48620627733329547,
260
+ "eval_loss": 1.0685973167419434,
261
+ "eval_precision": 0.5379831413851768,
262
+ "eval_recall": 0.5688073394495413,
263
+ "eval_runtime": 5.6811,
264
+ "eval_samples_per_second": 76.746,
265
+ "eval_steps_per_second": 1.232,
266
+ "step": 112
267
+ },
268
+ {
269
+ "epoch": 17.0,
270
+ "eval_accuracy": 0.5527522935779816,
271
+ "eval_f1": 0.4768952189953553,
272
+ "eval_loss": 1.0685114860534668,
273
+ "eval_precision": 0.597456441198868,
274
+ "eval_recall": 0.5527522935779816,
275
+ "eval_runtime": 5.7578,
276
+ "eval_samples_per_second": 75.723,
277
+ "eval_steps_per_second": 1.216,
278
+ "step": 119
279
+ },
280
+ {
281
+ "epoch": 17.14,
282
+ "learning_rate": 1.9047619047619046e-05,
283
+ "loss": 1.0178,
284
+ "step": 120
285
+ },
286
+ {
287
+ "epoch": 18.0,
288
+ "eval_accuracy": 0.5435779816513762,
289
+ "eval_f1": 0.4674479767067327,
290
+ "eval_loss": 1.0596745014190674,
291
+ "eval_precision": 0.5879862703091682,
292
+ "eval_recall": 0.5435779816513762,
293
+ "eval_runtime": 5.6723,
294
+ "eval_samples_per_second": 76.865,
295
+ "eval_steps_per_second": 1.234,
296
+ "step": 126
297
+ },
298
+ {
299
+ "epoch": 18.57,
300
+ "learning_rate": 9.523809523809523e-06,
301
+ "loss": 1.0127,
302
+ "step": 130
303
+ },
304
+ {
305
+ "epoch": 19.0,
306
+ "eval_accuracy": 0.5711009174311926,
307
+ "eval_f1": 0.49584804868136073,
308
+ "eval_loss": 1.0444973707199097,
309
+ "eval_precision": 0.6083837079452298,
310
+ "eval_recall": 0.5711009174311926,
311
+ "eval_runtime": 5.7997,
312
+ "eval_samples_per_second": 75.176,
313
+ "eval_steps_per_second": 1.207,
314
+ "step": 133
315
+ },
316
+ {
317
+ "epoch": 20.0,
318
+ "learning_rate": 0.0,
319
+ "loss": 0.9996,
320
+ "step": 140
321
+ },
322
+ {
323
+ "epoch": 20.0,
324
+ "eval_accuracy": 0.5642201834862385,
325
+ "eval_f1": 0.4813301262382135,
326
+ "eval_loss": 1.0442301034927368,
327
+ "eval_precision": 0.5786388539620291,
328
+ "eval_recall": 0.5642201834862385,
329
+ "eval_runtime": 5.7325,
330
+ "eval_samples_per_second": 76.058,
331
+ "eval_steps_per_second": 1.221,
332
+ "step": 140
333
+ },
334
+ {
335
+ "epoch": 20.0,
336
+ "step": 140,
337
+ "total_flos": 7.403951867151974e+17,
338
+ "train_loss": 1.1767685549599785,
339
+ "train_runtime": 421.2758,
340
+ "train_samples_per_second": 82.749,
341
+ "train_steps_per_second": 0.332
342
+ }
343
+ ],
344
+ "max_steps": 140,
345
+ "num_train_epochs": 20,
346
+ "total_flos": 7.403951867151974e+17,
347
+ "trial_name": null,
348
+ "trial_params": null
349
+ }
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:22dfc15d863743abc224a6966344a36db76a2052ffbe6fa1ac53f6ac1fb592fc
3
  size 3439
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:876d7e78c817fe779bc15169c094ee092e47bccd8d6e39205df03f212a0caa14
3
  size 3439